hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
719866b276a4bdae511ef887a01d7cb9147319b6.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse activation's per channel scale+bias+relu
into the wgrad mainloop.
Compared with original fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors are the same as the
activation channel number. This kernels loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h for more
technical details.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 5;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dWgradFusionKernel = typename cutlass::conv::kernel::DefaultConv2dWgradFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv2dWgradFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size,
cutlass::MatrixCoord stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "26_ampere_wgrad_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activation into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " backward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
hipError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(hipSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_b(options.input_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size);
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill scale vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_scale.sync_device();
tensor_b_bias.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_b_scale.device_ref(),
tensor_b_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_b.at({n, h, w, c}) = ::max(
ElementOutput(0), ElementOutput(tensor_b.at({n, h, w, c}) *
tensor_b_scale.at({0, c}) +
tensor_b_bias.at({0, c})));
}
}
}
}
tensor_transformed_b.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv2dWgrad<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_transformed_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "26_ampere_wgrad_mainloop_fusion_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major == 8 && props.minor == 0)) {
std::cerr << "This test must run on SM80 A100.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 408};
struct Benchmark {
int h, w, c, k, r, s, stride_h, stride_w;
} layers[] = {
{56, 56, 64, 256, 1, 1, 1, 1},
{56, 56, 64, 64, 1, 1, 1, 1},
{56, 56, 64, 64, 3, 3, 1, 1},
{56, 56, 256, 64, 1, 1, 1, 1},
{56, 56, 256, 512, 1, 1, 2, 2},
{56, 56, 256, 128, 1, 1, 1, 1},
{56, 56, 128, 128, 3, 3, 2, 2},
{28, 28, 128, 512, 1, 1, 1, 1},
{28, 28, 512, 128, 1, 1, 1, 1},
{28, 28, 128, 128, 3, 3, 1, 1},
{28, 28, 512, 1024, 1, 1, 2, 2},
{28, 28, 512, 256, 1, 1, 1, 1},
{28, 28, 256, 256, 3, 3, 2, 2},
{14, 14, 256, 1024, 1, 1, 1, 1},
{14, 14, 1024, 256, 1, 1, 1, 1},
{14, 14, 256, 256, 3, 3, 1, 1},
{14, 14, 1024, 2048, 1, 1, 2, 2},
{14, 14, 1024, 512, 1, 1, 1, 1},
{14, 14, 512, 512, 3, 3, 2, 2},
{ 7, 7, 512, 2048, 1, 1, 1, 1},
{ 7, 7, 2048, 512, 1, 1, 1, 1},
{ 7, 7, 512, 512, 3, 3, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c},
{layer.k, layer.r, layer.s, layer.c},
{layer.stride_h, layer.stride_w});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 719866b276a4bdae511ef887a01d7cb9147319b6.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to fuse activation's per channel scale+bias+relu
into the wgrad mainloop.
Compared with original fprop kernel, this example has two more vectors, one for
the scale and one for the bias. The length of the vectors are the same as the
activation channel number. This kernels loads the vectors when the associated
activation channels are loaded in the mainloop. Between reading the
activations and scale/bias data from the shared memory and calling tensor core
instructions, scale+bias+relu is computed in the register file.
This example is customized for Ampere 16816 fp16 tensor core instruction.
Changing to different data types or different tensor core instruction require
source code changing. See
include/cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h for more
technical details.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad_fusion.h"
#include "cutlass/conv/device/implicit_gemm_convolution_fusion.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementInputScaleBias = cutlass::half_t; // Data type of elements in input sclae and bias vectors
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutInputScaleBias = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 5;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
using Conv2dWgradFusionKernel = typename cutlass::conv::kernel::DefaultConv2dWgradFusion<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementInputScaleBias, LayoutInputScaleBias,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemmFusion = cutlass::conv::device::ImplicitGemmConvolutionFusion<Conv2dWgradFusionKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
bool reference_check;
bool measure_performance;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
reference_check(true),
measure_performance(false),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 8;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size,
cutlass::MatrixCoord stride) {
this->input_size = input_size;
this->filter_size = filter_size;
conv_stride = stride;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("ref-check")) {
reference_check = true;
}
if (cmd.check_cmd_line_flag("perf-check")) {
measure_performance = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "26_ampere_wgrad_mainloop_fusion example\n\n"
<< " This example fuses scale+bias+relu of the activation into Ampere's\n"
<< " Tensor Core operators on F16 data types to compute\n"
<< " backward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n=<int> Input tensor extent N\n"
<< " --h=<int> Input tensor extent H\n"
<< " --w=<int> Input tensor extent W\n"
<< " --c=<int> Input tensor extent C\n"
<< " --k=<int> Filter extent K\n"
<< " --r=<int> Filter extent R\n"
<< " --s=<int> Filter extent S\n\n"
<< " --alpha=<float> Epilogue scalar alpha\n"
<< " --beta=<float> Epilogue scalar beta\n\n"
<< " --ref-check If set (true), reference check on the host is computed\n"
<< " --perf-check If set (true), performance is measured.\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag=<string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/26_ampere_wgrad_mainloop_fusion/26_ampere_wgrad_mainloop_fusion --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cutlass::Status reference_check;
cudaError_t error;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
reference_check(cutlass::Status::kInvalid),
error(cudaSuccess) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Stride_H,Stride_W,Runtime,GFLOPs";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< options.conv_stride.row() << ","
<< options.conv_stride.column() << ","
<< runtime_ms << ","
<< gflops;
return out;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.output_size());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.input_size);
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_transformed_b(options.input_size);
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_scale({1, options.input_size.c()});
cutlass::HostTensor<ElementInputScaleBias, LayoutInputScaleBias>
tensor_b_bias({1, options.input_size.c()});
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.filter_size);
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.filter_size);
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(7),
ElementInputB(-8),
0);
// Fill scale vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_scale.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill bias vector for tensor B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_bias.host_view(),
1,
ElementInputA(3),
ElementInputA(-4),
0);
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0);
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d.host_view());
// Fill tensor D for reference on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view());
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_b_scale.sync_device();
tensor_b_bias.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
typename ImplicitGemmFusion::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_b_scale.device_ref(),
tensor_b_bias.device_ref(),
tensor_c.device_ref(),
tensor_d.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemmFusion implicit_gemm_fusion_op;
size_t workspace_size = implicit_gemm_fusion_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
result.status = implicit_gemm_fusion_op.can_implement(arguments);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_fusion_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
//
// Optional reference check
//
if (options.reference_check) {
std::cout << "Verification on device...\n";
// Compute scale + bias + relu in host code
for (int n = 0; n < options.input_size.n(); ++n) {
for (int h = 0; h < options.input_size.h(); ++h) {
for (int w = 0; w < options.input_size.w(); ++w) {
for (int c = 0; c < options.input_size.c(); ++c) {
tensor_transformed_b.at({n, h, w, c}) = std::max(
ElementOutput(0), ElementOutput(tensor_b.at({n, h, w, c}) *
tensor_b_scale.at({0, c}) +
tensor_b_bias.at({0, c})));
}
}
}
}
tensor_transformed_b.sync_device();
// Compute with reference implementation
cutlass::reference::device::Conv2dWgrad<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>
>(
problem_size,
tensor_a.device_ref(),
tensor_transformed_b.device_ref(),
tensor_c.device_ref(),
tensor_ref_d.device_ref(),
options.alpha,
options.beta
);
// Check if output from CUTLASS kernel and reference kernel are equal or not
tensor_d.sync_host();
tensor_ref_d.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (!passed) {
result.reference_check = cutlass::Status::kErrorInternal;
std::cout << "ERROR - results miscompared.\n";
}
else {
result.reference_check = cutlass::Status::kSuccess;
std::cout << "Passed.\n";
}
}
else {
result.reference_check = cutlass::Status::kInvalid;
}
if (options.save_workspace) {
std::stringstream ss;
ss << "26_ampere_wgrad_mainloop_fusion_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a.host_view() << "\n\n"
<< "Filters = \n" << tensor_b.host_view() << "\n\n";
if (options.reference_check) {
output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n";
}
output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl;
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
//
// Performance measurement
//
if (options.measure_performance) {
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_fusion_op();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major == 8 && props.minor == 0)) {
std::cerr << "This test must run on SM80 A100.\n";
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {34, 408};
struct Benchmark {
int h, w, c, k, r, s, stride_h, stride_w;
} layers[] = {
{56, 56, 64, 256, 1, 1, 1, 1},
{56, 56, 64, 64, 1, 1, 1, 1},
{56, 56, 64, 64, 3, 3, 1, 1},
{56, 56, 256, 64, 1, 1, 1, 1},
{56, 56, 256, 512, 1, 1, 2, 2},
{56, 56, 256, 128, 1, 1, 1, 1},
{56, 56, 128, 128, 3, 3, 2, 2},
{28, 28, 128, 512, 1, 1, 1, 1},
{28, 28, 512, 128, 1, 1, 1, 1},
{28, 28, 128, 128, 3, 3, 1, 1},
{28, 28, 512, 1024, 1, 1, 2, 2},
{28, 28, 512, 256, 1, 1, 1, 1},
{28, 28, 256, 256, 3, 3, 2, 2},
{14, 14, 256, 1024, 1, 1, 1, 1},
{14, 14, 1024, 256, 1, 1, 1, 1},
{14, 14, 256, 256, 3, 3, 1, 1},
{14, 14, 1024, 2048, 1, 1, 2, 2},
{14, 14, 1024, 512, 1, 1, 1, 1},
{14, 14, 512, 512, 3, 3, 2, 2},
{ 7, 7, 512, 2048, 1, 1, 1, 1},
{ 7, 7, 2048, 512, 1, 1, 1, 1},
{ 7, 7, 512, 512, 3, 3, 1, 1},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c},
{layer.k, layer.r, layer.s, layer.c},
{layer.stride_h, layer.stride_w});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
e09ed747a56bba44128c9ac65086371a9aa6b2bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
//
// ****************************
// *** MC-GPU , version 1.3 ***
// ****************************
//
//! Definition of the CUDA GPU kernel for the simulation of x ray tracks in a voxelized geometry.
//! This kernel has been optimized to yield a good performance in the GPU but can still be
//! compiled in the CPU without problems. All the CUDA especific commands are enclosed in
//! pre-processor directives that are skipped if the parameter "USING_CUDA" is not defined
//! at compilation time.
//
// ** DISCLAIMER **
//
// This software and documentation (the "Software") were developed at the Food and
// Drug Administration (FDA) by employees of the Federal Government in the course
// of their official duties. Pursuant to Title 17, Section 105 of the United States
// Code, this work is not subject to copyright protection and is in the public
// domain. Permission is hereby granted, free of charge, to any person obtaining a
// copy of the Software, to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, or sell copies of the Software or derivatives, and to permit persons
// to whom the Software is furnished to do so. FDA assumes no responsibility
// whatsoever for use by other parties of the Software, its source code,
// documentation or compiled executables, and makes no guarantees, expressed or
// implied, about its quality, reliability, or any other characteristic. Further,
// use of this code in no way implies endorsement by the FDA or confers any
// advantage in regulatory decisions. Although this software can be redistributed
// and/or modified freely, we ask that any derivative works bear some notice that
// they are derived from it, and any modified versions bear some notice that they
// have been modified.
//
//
//! @file MC-GPU_kernel_v1.3.cu
//! @author Andreu Badal ([email protected])
//! @date 2012/12/12
// -- Original code started on: 2009/04/14
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Initialize the image array, ie, set all pixels to zero
//! Essentially, this function has the same effect as the command:
//! "cutilSafeCall(hipMemcpy(image_device, image, image_bytes, hipMemcpyHostToDevice))";
//!
//! CUDA performs some initialization work the first time a GPU kernel is called.
//! Therefore, calling a short kernel before the real particle tracking is performed
//! may improve the accuracy of the timing measurements in the relevant kernel.
//!
//! @param[in,out] image Pointer to the image array.
//! @param[in] pixels_per_image Number of pixels in the image (ie, elements in the array).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__global__
void init_image_array_GPU(unsigned long long int* image, int pixels_per_image)
{
int my_pixel = threadIdx.x + blockIdx.x*blockDim.x;
if (my_pixel < pixels_per_image)
{
// -- Set the current pixel to 0 and return, avoiding overflow when more threads than pixels are used:
image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image
}
}
// ////////////////////////////////////////////////////////////////////////////////
// //! Initialize the dose deposition array, ie, set all voxel doses to zero
// //!
// //! @param[in,out] dose Pointer to the dose mean and sigma arrays.
// //! @param[in] num_voxels_dose Number of voxels in the dose ROI (ie, elements in the arrays).
// ////////////////////////////////////////////////////////////////////////////////
// __global__
// void init_dose_array_GPU(ulonglong2* voxels_Edep, int num_voxels_dose)
// {
// int my_voxel = threadIdx.x + blockIdx.x*blockDim.x;
// register ulonglong2 ulonglong2_zero;
// ulonglong2_zero.x = ulonglong2_zero.y = (unsigned long long int) 0;
// if (my_voxel < num_voxels_dose)
// {
// dose[my_voxel] = ulonglong2_zero; // Set the current voxel to (0,0) and return, avoiding overflow
// }
// }
#endif
////////////////////////////////////////////////////////////////////////////////
//! Main function to simulate x-ray tracks inside a voxelized geometry.
//! Secondary electrons are not simulated (in photoelectric and Compton
//! events the energy is locally deposited).
//!
//! The following global variables, in the GPU __constant__ memory are used:
//! voxel_data_CONST,
//! source_energy_data_CONST,
//! detector_data_CONST,
//! mfp_table_data_CONST.
//!
//! @param[in] history_batch Particle batch number (only used in the CPU version when CUDA is disabled!, the GPU uses the built-in variable threadIdx)
//! @param[in] num_p Projection number in the CT simulation. This variable defines a specific angle and the corresponding source and detector will be used.
//! @param[in] histories_per_thread Number of histories to simulate for each call to this function (ie, for GPU thread).
//! @param[in] seed_input Random number generator seed (the same seed is used to initialize the two MLCGs of RANECU).
//! @param[in] voxel_mat_dens Pointer to the voxel densities and material vector (the voxelized geometry), stored in GPU glbal memory.
//! @param[in] mfp_Woodcock_table Two parameter table for the linear interpolation of the Woodcock mean free path (MFP) (stored in GPU global memory).
//! @param[in] mfp_table_a First element for the linear interpolation of the interaction mean free paths (stored in GPU global memory).
//! @param[in] mfp_table_b Second element for the linear interpolation of the interaction mean free paths (stored in GPU global memory).
//! @param[in] rayleigh_table Pointer to the table with the data required by the Rayleigh interaction sampling, stored in GPU global memory.
//! @param[in] compton_table Pointer to the table with the data required by the Compton interaction sampling, stored in GPU global memory.
//! @param[in,out] image Pointer to the image vector in the GPU global memory.
//! @param[in,out] dose Pointer to the array containing the 3D voxel dose (and its uncertainty) in the GPU global memory.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__global__ void track_particles(int histories_per_thread,
int num_p, // For a CT simulation: allocate space for up to MAX_NUM_PROJECTIONS projections.
int seed_input,
unsigned long long int* image,
ulonglong2* voxels_Edep,
float2* voxel_mat_dens,
float2* mfp_Woodcock_table,
float3* mfp_table_a,
float3* mfp_table_b,
struct rayleigh_struct* rayleigh_table,
struct compton_struct* compton_table,
struct detector_struct* detector_data_array,
struct source_struct* source_data_array,
ulonglong2* materials_dose)
#else
void track_particles(int history_batch, // This variable is not required in the GPU, it uses the thread ID
int histories_per_thread,
int num_p,
int seed_input,
unsigned long long int* image,
ulonglong2* voxels_Edep,
float2* voxel_mat_dens,
float2* mfp_Woodcock_table,
float3* mfp_table_a,
float3* mfp_table_b,
struct rayleigh_struct* rayleigh_table,
struct compton_struct* compton_table,
struct detector_struct* detector_data_array,
struct source_struct* source_data_array,
ulonglong2* materials_dose)
#endif
{
// -- Declare the track state variables:
float3 position, direction;
float energy, step, prob, randno, mfp_density, mfp_Woodcock;
float3 mfp_table_read_a, mfp_table_read_b;
int2 seed;
int index;
int material0, // Current material, starting at 0 for 1st material
material_old; // Flag to mark a material or energy change
signed char scatter_state; // Flag for scatter images: scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter.
// -- Store the Compton table in shared memory from global memory:
// For Compton and Rayleigh the access to memory is not coherent and the caching capability do not speeds up the accesses, they actually slows down the acces to other data.
#ifdef USING_CUDA
__shared__
#endif
struct compton_struct cgco_SHARED;
#ifdef USING_CUDA
__shared__
#endif
struct detector_struct detector_data_SHARED;
#ifdef USING_CUDA
__shared__
#endif
struct source_struct source_data_SHARED;
#ifdef USING_CUDA
if (0==threadIdx.x) // First GPU thread copies the variables to shared memory
{
#endif
// -Copy the current source, detector data from global to shared memory for fast access:
source_data_SHARED = source_data_array[num_p];
detector_data_SHARED = detector_data_array[num_p]; // Copy the long array to a single instance in shared memory for the current projection
// -Copy the compton data to shared memory:
cgco_SHARED = *compton_table;
#ifdef USING_CUDA
}
__syncthreads(); // Make sure all threads will see the initialized shared variable
#endif
// -- Initialize the RANECU generator in a position far away from the previous history:
#ifdef USING_CUDA
init_PRNG((threadIdx.x + blockIdx.x*blockDim.x), histories_per_thread, seed_input, &seed); // Using a 1D block
#else
init_PRNG(history_batch, histories_per_thread, seed_input, &seed);
#endif
// -- Loop for the "histories_per_thread" particles in the current history_batch:
for( ; histories_per_thread>0; histories_per_thread--)
{
// printf("\n\n********* NEW HISTORY: %d [seeds: %d, %d]\n\n", histories_per_thread, seed.x, seed.y); // fflush(stdout); // !!Verbose!! calling printf from the GPU is possible but if multiple threads call it at the same time some output will be lost.
int absvox = 1;
// -- Call the source function to get a primary x ray:
source(&position, &direction, &energy, &seed, &absvox, &source_data_SHARED, &detector_data_SHARED);
scatter_state = (signed char)0; // Reset previous scatter state: new non-scattered particle loaded
// -- Find the current energy bin by truncation (this could be pre-calculated for a monoenergetic beam):
// The initialization host code made sure that the sampled energy will always be within the tabulated energies (index never negative or too large).
#ifdef USING_CUDA
index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite)
#else
index = (int)((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide + 0.00001f); // Adding EPSILON to truncate to INT towards minus infinite. There may be a small error for energy<=mfp_table_data_CONST.e0 but this case is irrelevant (particles will always have more energy than e0).
#endif
// -- Get the minimum mfp at the current energy using linear interpolation (Woodcock tracking):
{
float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory
mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP
}
// -- Reset previous material to force a recalculation of the MFPs (negative materials are not allowed in the voxels):
material_old = -1;
// *** X-ray interaction loop:
for(;;)
{
if (absvox<0) // !!DeBuG!! MC-GPU_v1.3 ==> if I move this "if" above the code runs much slower!? Why???
break; // -- Primary particle was not pointing to the voxel region! (but may still be detected after moving in vacuum in a straight line).
// *** Virtual interaction loop: // New loop structure in MC-GPU_v1.3: simulate all virtual events before sampling Compton & Rayleigh: // !!DeBuG!!
float2 matdens;
short3 voxel_coord; // Variable used only by DOSE TALLY
do
{
step = -(mfp_Woodcock)*logf(ranecu(&seed)); // Using the minimum MFP in the geometry for the input energy (Woodcock tracking)
position.x += step*direction.x;
position.y += step*direction.y;
position.z += step*direction.z;
// -- Locate the new particle in the voxel geometry:
absvox = locate_voxel(&position, &voxel_coord); // Get the voxel number at the current position and the voxel coordinates (used to check if inside the dose ROI in DOSE TALLY).
if (absvox<0)
break; // -- Particle escaped the voxel region! ("index" is still >0 at this moment)
matdens = voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory
material0 = (int)(matdens.x - 1); // Set the current material by truncation, and set 1st material to value '0'.
// -- Get the data for the linear interpolation of the interaction MFPs, in case the energy or material have changed:
if (material0 != material_old)
{
mfp_table_read_a = mfp_table_a[index*(MAX_MATERIALS)+material0];
mfp_table_read_b = mfp_table_b[index*(MAX_MATERIALS)+material0];
material_old = material0; // Store the new material
}
// *** Apply Woodcock tracking:
mfp_density = mfp_Woodcock * matdens.y;
// -- Calculate probability of delta scattering, using the total mean free path for the current material and energy (linear interpolation):
prob = 1.0f - mfp_density * (mfp_table_read_a.x + energy * mfp_table_read_b.x);
randno = ranecu(&seed); // Sample uniform PRN
}
while (randno<prob); // [Iterate if there is a delta scattering event]
if (absvox<0)
break; // -- Particle escaped the voxel region! Break the interaction loop to call tally image.
// The GPU threads will be stopped and waiting here until ALL threads have a REAL event:
// -- Real event takes place! Check the kind of event and sample the effects of the interaction:
prob += mfp_density * (mfp_table_read_a.y + energy * mfp_table_read_b.y); // Interpolate total Compton MFP ('y' component)
if (randno<prob) // [Checking Compton scattering]
{
// *** Compton interaction:
// -- Sample new direction and energy:
double costh_Compton;
randno = energy; // Save temporal copy of the particle energy (variable randno not necessary until next sampling). DOSE TALLY
GCOa(&energy, &costh_Compton, &material0, &seed, &cgco_SHARED);
rotate_double(&direction, costh_Compton, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed));
randno = energy - randno; // Save temporal copy of the negative of the energy lost in the interaction. DOSE TALLY
// -- Find the new energy interval:
#ifdef USING_CUDA
index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite)
#else
index = (int)((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide + 0.00001f); // Adding EPSILON to truncate to INT
#endif
if (index>-1) // 'index' will be negative only when the energy is below the tabulated minimum energy: particle will be then absorbed (rejected) after tallying the dose.
{
// -- Get the Woodcock MFP for the new energy (energy above minimum cutoff):
float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory
mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP
material_old = -2; // Set an impossible material to force an update of the MFPs data for the nex energy interval
// -- Update scatter state:
if (scatter_state==(signed char)0)
scatter_state = (signed char)1; // Set scatter_state == 1: Compton scattered particle
else
scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle
}
}
else
{
prob += mfp_density * (mfp_table_read_a.z + energy * mfp_table_read_b.z); // Interpolate total Rayleigh MFP ('z' component)
if (randno<prob) // [Checking Rayleigh scattering]
{
// *** Rayleigh interaction:
// -- Sample angular deflection:
double costh_Rayleigh;
float pmax_current = rayleigh_table->pmax[(index+1)*MAX_MATERIALS+material0]; // Get max (ie, value for next bin?) cumul prob square form factor for Rayleigh sampling
GRAa(&energy, &costh_Rayleigh, &material0, &pmax_current, &seed, rayleigh_table);
rotate_double(&direction, costh_Rayleigh, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed));
// -- Update scatter state:
if (scatter_state==(signed char)0)
scatter_state = (signed char)2; // Set scatter_state == 1: Rayleigh scattered particle
else
scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle
}
else
{
// *** Photoelectric interaction (or pair production): mark particle for absorption after dose tally (ie, index<0)!
randno = -energy; // Save temporal copy of the (negative) energy deposited in the interaction (variable randno not necessary anymore).
index = -11; // A negative "index" marks that the particle was absorved and that it will never arrive at the detector.
}
}
// -- Tally the dose deposited in Compton and photoelectric interactions:
if (randno<-0.001f)
{
float Edep = -1.0f*randno; // If any energy was deposited, this variable will temporarily store the negative value of Edep.
// -- Tally the dose deposited in the current material, if enabled (ie, array allocated and not null):
if (materials_dose!=NULL)
tally_materials_dose(&Edep, &material0, materials_dose); // !!tally_materials_dose!!
// -- Tally the energy deposited in the current voxel, if enabled (tally disabled when dose_ROI_x_max_CONST is negative). DOSE TALLY
if (dose_ROI_x_max_CONST > -1)
tally_voxel_energy_deposition(&Edep, &voxel_coord, voxels_Edep);
}
// -- Break interaction loop for particles that have been absorved or with energy below the tabulated cutoff: particle is "absorbed" (ie, track discontinued).
if (index<0)
break;
} // [Cycle the X-ray interaction loop]
if (index>-1)
{
// -- Particle escaped the voxels but was not absorbed, check if it will arrive at the detector and tally its energy:
tally_image(&energy, &position, &direction, &scatter_state, image, &source_data_SHARED, &detector_data_SHARED);
}
} // [Continue with a new history]
} // [All tracks simulated for this kernel call: return to CPU]
////////////////////////////////////////////////////////////////////////////////
//! Tally the dose deposited in the voxels.
//! This function is called whenever a particle suffers a Compton or photoelectric
//! interaction. It is not necessary to call this function if the dose tally
//! was disabled in the input file (ie, dose_ROI_x_max_CONST < 0).
//! Electrons are not transported in MC-GPU and therefore we are approximating
//! that the dose is equal to the KERMA (energy released by the photons alone).
//! This approximation is acceptable when there is electronic equilibrium and when
//! the range of the secondary electrons is shorter than the voxel size. Usually the
//! doses will be acceptable for photon energies below 1 MeV. The dose estimates may
//! not be accurate at the interface of low density volumes.
//!
//! We need to use atomicAdd() in the GPU to prevent that multiple threads update the
//! same voxel at the same time, which would result in a lose of information.
//! This is very improbable when using a large number of voxels but gives troubles
//! with a simple geometries with few voxels (in this case the atomicAdd will slow
//! down the code because threads will update the voxel dose secuentially).
//!
//!
//! @param[in] Edep Energy deposited in the interaction
//! @param[in] voxel_coord Voxel coordinates, needed to check if particle located inside the input region of interest (ROI)
//! @param[out] voxels_Edep ulonglong2 array containing the 3D voxel dose and dose^2 (ie, uncertainty) as unsigned integers scaled by SCALE_eV.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline
void tally_voxel_energy_deposition(float* Edep, short3* voxel_coord, ulonglong2* voxels_Edep)
{
// !!DeBuG!! Maybe it would be faster to store a 6 element struct and save temp copy?? struct_short_int_x6_align16 dose_ROI_size = dose_ROI_size_CONST; // Get ROI coordinates from GPU constant memory and store temporal copy
if((voxel_coord->x < dose_ROI_x_min_CONST) || (voxel_coord->x > dose_ROI_x_max_CONST) ||
(voxel_coord->y < dose_ROI_y_min_CONST) || (voxel_coord->y > dose_ROI_y_max_CONST) ||
(voxel_coord->z < dose_ROI_z_min_CONST) || (voxel_coord->z > dose_ROI_z_max_CONST))
{
return; // -- Particle outside the ROI: return without tallying anything.
}
// -- Particle inside the ROI: tally Edep.
register int DX = 1 + (int)(dose_ROI_x_max_CONST - dose_ROI_x_min_CONST);
register int num_voxel = (int)(voxel_coord->x-dose_ROI_x_min_CONST) + ((int)(voxel_coord->y-dose_ROI_y_min_CONST))*DX + ((int)(voxel_coord->z-dose_ROI_z_min_CONST))*DX*(1 + (int)(dose_ROI_y_max_CONST-dose_ROI_y_min_CONST));
#ifdef USING_CUDA
atomicAdd(&voxels_Edep[num_voxel].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the voxel, scaled by the factor SCALE_eV and rounded.
atomicAdd(&voxels_Edep[num_voxel].y, __float2ull_rn((*Edep)*(*Edep)) ); // (not using SCALE_eV for std_dev to prevent overflow)
#else
voxels_Edep[num_voxel].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f);
voxels_Edep[num_voxel].y += (unsigned long long int)((*Edep)*(*Edep) + 0.5f);
#endif
return;
}
////////////////////////////////////////////////////////////////////////////////
//! Tally a radiographic projection image.
//! This function is called whenever a particle escapes the voxelized volume.
//! The code checks if the particle would arrive at the detector if it kept
//! moving in a straight line after exiting the voxels (assuming vacuum enclosure).
//! An ideal image formation model is implemented: each pixel counts the total energy
//! of the x rays that enter the pixel (100% detection efficiency for any energy).
//! The image due to primaries and different kinds of scatter is tallied separately.
//!
//! In the GPU, and atomicAdd() function is used to make sure that multiple threads do
//! not update the same pixel at the same time, which would result in a lose of information.
//! Since the atomicAdd function is only available for 'unsigned long long int' data,
//! the float pixel values are scaled by a factor "SCALE_eV" defined in the header file
//! (eg, #define SCALE_eV 10000.0f) and stored as unsigned long long integers in main
//! memory.
//!
//! WARNING! If the total tallied signal (for all particles) is larger than "1.8e19/SCALE_eV",
//! there will be a bit overflow and the value will be reset to 0 giving bogus results.
//!
//! WARNING! The detector plane should be located outside the voxels bounding box. However, since
//! the particles are moved outside the bbox in the last step, they could cross the detector
//! plane anyway. If the particles are less than 2.0 cm behind the detector, they are moved
//! back and detected. Therefore the detector can be a few cm inside the bbox and still work.
//! If the Woodcock mean free path is larger than the distance from the bbox to the detector,
//! we may lose some particles behind the detector!
//!
//!
//! @param[in] energy X-ray energy
//! @param[in] position Particle position
//! @param[in] direction Particle direction (cosine vectors)
//! @param[in] scatter_state Flag marking primaries, single Compton, single Rayleigh or multiple scattered radiation
//! @param[out] image Integer array containing the image, ie, the pixel values (in tenths of meV)
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void tally_image(float* energy, float3* position, float3* direction, signed char* scatter_state, unsigned long long int* image, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED)
{
float dist_detector, rotated_position;
if (detector_data_SHARED->rotation_flag == 1) // --> Initial source direction is not (0,1,0): detector has to be rotated to +Y to find the pixel number
{
// *** Skip particles not moving towards the detector.
// (a) Skip particles that were deflected more than 90 deg from the original source direction (backscatter).
// (b) Skip particles located more than 10 cm behind the detector
// (c) Skip particles for which the direction to the detector is way bigger than SDD (likely to intersect the plane outside the pixel region).
// !!DeBuG!! NOTE: This may give problems for big detectors very close to the source
// !!DeBuG!! Particles located after the detector will be moved back to the surface of the detector, but 10 cm maximum!!
// In this way the detector can intersect the voxels bbox or be located right on the surface of the bbox: the particles will be
// transported across the detector and until a little after the end of the bbox in the last step, but then moved back.
// This algorithm will give correct results ONLY when the detector intersects just slightly the air space around the phantom,
// so that the interactions after the detector are not significant (this happens sometimes using oblique beams).
// I could remove particles after the detector using "if (dist_detector<0.0f) return;".
// (a) Calculate the angle between the particle and the initial direction (dot product): reject particle if cos_angle < cos(89)==0 (angle>89deg):
// [Extra parenthesis are coded to suggest to the compiler the use of intrinsic multiply-add operations].
register float cos_angle = direction->x * source_data_SHARED->direction.x +
(direction->y * source_data_SHARED->direction.y +
(direction->z * source_data_SHARED->direction.z));
if (cos_angle < 0.025f)
return; // Reject particle: Angle larger than 89 deg --> particle moving parallel to the detector or backwards towards the source!
// (b) Find the distance from the current particle location (likely just after the surface of the voxel bbox) to the intersection with the detector plane:
dist_detector = ( source_data_SHARED->direction.x * (detector_data_SHARED->center.x - position->x) +
(source_data_SHARED->direction.y * (detector_data_SHARED->center.y - position->y) +
(source_data_SHARED->direction.z * (detector_data_SHARED->center.z - position->z))) ) / cos_angle;
// !!DeBuG!! IF's below (used in v1.2) are not needed when checking the x ray angle:
// if (dist_detector < -10.0f) // !!DeBuG!! Is 10 cm enough or too much? Should I use 0? or allow any distance?
// return; // !!DeBuG!! Reject particles located more than 10 cm behind the detector. 10 cm was selected arbitrarily. Woodcock MFP for x-rays in bone: MFP 200 keV photons in bone ==> 4 cm.
//
// if (fabsf(dist_detector)>(2.1f*detector_data_CONST.sdd))
// return; // Reject particle: distance to the detector plane too large, the particle is likely to travel almost parallel to the detector and will not be detected.
// *** Translate the particle to the detector plane (we assume the detector is completely absorbent: 100% detection efficiency):
position->x = position->x + dist_detector * direction->x;
position->y = position->y + dist_detector * direction->y;
position->z = position->z + dist_detector * direction->z;
// *** Rotate the particle position vector to the default reference system where the detector is perpendicular to the +Y axis, then find out if the particle is located inside a pixel:
#ifdef USING_CUDA
rotated_position = detector_data_SHARED->rot_inv[0]*position->x + detector_data_SHARED->rot_inv[1]*position->y + detector_data_SHARED->rot_inv[2]*position->z; // X coordinate
int pixel_coord_x = __float2int_rd((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.x) * detector_data_SHARED->inv_pixel_size_X); // Using CUDA intrinsic function to convert float to integer rounding down (towards minus infinite)
if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x))
{
rotated_position = detector_data_SHARED->rot_inv[6]*position->x + detector_data_SHARED->rot_inv[7]*position->y + detector_data_SHARED->rot_inv[8]*position->z; // Z coordinate
int pixel_coord_z = __float2int_rd((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.z) * detector_data_SHARED->inv_pixel_size_Z);
if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y))
{
// -- Particle enters the detector! Tally the particle energy in the corresponding pixel (in tenths of meV):
// Using a CUDA atomic function (not available for global floats yet) to read and increase the pixel value in a single instruction, blocking interferences from other threads.
// The offset for the primaries or scatter images are calculated considering that:
// scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter.
atomicAdd(( image + // Pointer to beginning of image array
(int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image
(pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel
__float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded.
// The maximum unsigned long long int value is ~1.8e19:
}
}
#else
// CPU version (not using CUDA intrinsics: atomicAdd, fast type casting)
rotated_position = detector_data_SHARED->rot_inv[0]*position->x + detector_data_SHARED->rot_inv[1]*position->y + detector_data_SHARED->rot_inv[2]*position->z; // X coordinate
float pixel_coord_x = floor((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X); // Using float+floor instead of INT to avoid truncation errors for positive and negative values
if ( (pixel_coord_x>-0.1f) && (pixel_coord_x<(detector_data_SHARED->num_pixels.x-0.1f)) ) // Rejecting values negative or bigger than the image size
{
rotated_position = detector_data_SHARED->rot_inv[6]*position->x + detector_data_SHARED->rot_inv[7]*position->y + detector_data_SHARED->rot_inv[8]*position->z; // Z coordinate
float pixel_coord_z = floor((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ( (pixel_coord_z>-0.1f) && (pixel_coord_z<(detector_data_SHARED->num_pixels.y-0.1f)) )
image[(int)(((float)*scatter_state)*detector_data_SHARED->total_num_pixels + pixel_coord_x + pixel_coord_z*detector_data_SHARED->num_pixels.x + 0.0001f)]
+= (unsigned long long int)((*energy)*SCALE_eV + 0.5f); // Tally the particle energy in the pixel. This instruction is not thread-safe, but it is ok in sequential CPU code.
}
#endif
}
else // (detector_data_SHARED->rotation_flag != 1) --> Initial source direction is (0,1,0): pixel number and distance can be found easily
{
if (direction->y < 0.0001f)
return; // *** Reject particles not moving towards the detector plane at +Y.
dist_detector = (detector_data_SHARED->center.y - position->y)/(direction->y); // Distance to the intersection with the detector at +Y.
// !!DeBuG!! IF below (v1.2) not needed when checking the angle
// if (dist_detector>(2.1f*detector_data_SHARED->sdd)) return;
#ifdef USING_CUDA
int pixel_coord_x = __float2int_rd((position->x + dist_detector*direction->x - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X);
if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x))
{
int pixel_coord_z = __float2int_rd((position->z + dist_detector*direction->z - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y))
atomicAdd( ( image + // Pointer to beginning of image array
(int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image
(pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel
__float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded.
}
#else
// --Calculate the pixel the xray enters, truncating towards minus infinite and making sure the conversion to int is safe:
float pixel_coord_x = floor((position->x + dist_detector*direction->x - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X);
if ( (pixel_coord_x>-0.1f) && (pixel_coord_x<(detector_data_SHARED->num_pixels.x-0.1f)) )
{
float pixel_coord_z = floor((position->z + dist_detector*direction->z - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ( (pixel_coord_z>-0.1f) && (pixel_coord_z<(detector_data_SHARED->num_pixels.y-0.1f)) )
image[(int)(((float)*scatter_state)*detector_data_SHARED->total_num_pixels + pixel_coord_x + pixel_coord_z*detector_data_SHARED->num_pixels.x + 0.0001f)]
+= (unsigned long long int)((*energy)*SCALE_eV + 0.5f); // Truncate the pixel number to INT and round the energy value
}
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//! Source that creates primary x rays, according to the defined source model.
//! The particles are automatically moved to the surface of the voxel bounding box,
//! to start the tracking inside a real material. If the sampled particle do not
//! enter the voxels, it is init in the focal spot and the main program will check
//! if it arrives at the detector or not.
//!
//! @param[in] source_data Structure describing the source.
//! @param[in] source_energy_data_CONST Global variable in constant memory space describing the source energy spectrum.
//! @param[out] position Initial particle position (particle transported inside the voxel bbox).
//! @param[out] direction Sampled particle direction (cosine vectors).
//! @param[out] energy Sampled energy of the new x ray.
//! @param[in] seed Current seed of the random number generator, requiered to sample the movement direction.
//! @param[out] absvox Set to <0 if primary particle will not cross the voxels, not changed otherwise (>0).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void source(float3* position, float3* direction, float* energy, int2* seed, int* absvox, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED)
{
// *** Sample the initial x-ray energy following the input energy spectrum using the Walker aliasing algorithm from PENELOPE:
// The following code is equivalent to calling the function "seeki_walker": int sampled_bin = seeki_walker(source_data_CONST.espc_cutoff, source_data_CONST.espc_alias, ranecu(seed), source_data_CONST.num_bins_espc);
int sampled_bin;
float RN = ranecu(seed) * source_energy_data_CONST.num_bins_espc; // Find initial interval (array starting at 0):
#ifdef USING_CUDA
int int_part = __float2int_rd(RN); // -- Integer part (round down)
#else
int int_part = (int)(RN);
#endif
float fraction_part = RN - ((float)int_part); // -- Fractional part
if (fraction_part < source_energy_data_CONST.espc_cutoff[int_part]) // Check if we are in the aliased part
sampled_bin = int_part; // Below the cutoff: return current value
else
sampled_bin = (int)source_energy_data_CONST.espc_alias[int_part]; // Above the cutoff: return alias
// Linear interpolation of the final energy within the sampled energy bin:
*energy = source_energy_data_CONST.espc[sampled_bin] + ranecu(seed) * (source_energy_data_CONST.espc[sampled_bin+1] - source_energy_data_CONST.espc[sampled_bin]);
// *** Sample the initial direction:
do // Iterate sampling if the sampled direction is not acceptable to get a square field at the given phi (rejection sampling): force square field for any phi!!
{
// Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat).
direction->z = source_data_SHARED->cos_theta_low + ranecu(seed)*source_data_SHARED->D_cos_theta; // direction->z = w = cos(theta_sampled)
register float phi_sampled = source_data_SHARED->phi_low + ranecu(seed)*source_data_SHARED->D_phi;
register float sin_theta_sampled = sqrtf(1.0f - direction->z*direction->z);
float sinphi_sampled, cosphi_sampled;
#ifdef USING_CUDA
sincos(phi_sampled, &sinphi_sampled,&cosphi_sampled); // Calculate the SIN and COS at the same time.
#else
sinphi_sampled = sin(phi_sampled); // Some CPU compilers will be able to use "sincos", but let's be safe.
cosphi_sampled = cos(phi_sampled);
#endif
direction->y = sin_theta_sampled * sinphi_sampled;
direction->x = sin_theta_sampled * cosphi_sampled;
}
while( fabsf(direction->z/(direction->y+1.0e-7f)) > source_data_SHARED->max_height_at_y1cm ); // !!DeBuG!! Force square field for any phi by rejection sampling!! Is it necessary to use the "+1.0e-7f" to prevent possible division by zero???
if (detector_data_SHARED->rotation_flag == 1)
{
// --Initial beam not pointing to (0,1,0), apply rotation:
register float direction_x_tmp = direction->x;
register float direction_y_tmp = direction->y;
direction->x = source_data_SHARED->rot_fan[0]*direction_x_tmp + source_data_SHARED->rot_fan[1]*direction_y_tmp + source_data_SHARED->rot_fan[2]*direction->z;
direction->y = source_data_SHARED->rot_fan[3]*direction_x_tmp + source_data_SHARED->rot_fan[4]*direction_y_tmp + source_data_SHARED->rot_fan[5]*direction->z;
direction->z = source_data_SHARED->rot_fan[6]*direction_x_tmp + source_data_SHARED->rot_fan[7]*direction_y_tmp + source_data_SHARED->rot_fan[8]*direction->z;
}
// Initialize x ray position at focal spot before translation into bbox. Particle stays in focal spot if no interaction found:
position->x = source_data_SHARED->position.x;
position->y = source_data_SHARED->position.y;
position->z = source_data_SHARED->position.z;
move_to_bbox(position, direction, voxel_data_CONST.size_bbox, absvox); // Move the particle inside the voxel bounding box.
}
////////////////////////////////////////////////////////////////////////////////
//! Functions that moves a particle inside the voxelized geometry bounding box.
//! An EPSILON distance is added to make sure the particles will be clearly inside the bbox,
//! not exactly on the surface.
//!
//! This algorithm makes the following assumtions:
//! - The back lower vertex of the voxel bounding box is always located at the origin: (x0,y0,z0)=(0,0,0).
//! - The initial value of "position" corresponds to the focal spot location.
//! - When a ray is not pointing towards the bbox plane that it should cross according to the sign of the direction,
//! I assign a distance to the intersection =0 instead of the real negative distance. The wall that will be
//! crossed to enter the bbox is always the furthest and therefore a 0 distance will never be used except
//! in the case of a ray starting inside the bbox or outside the bbox and not pointing to any of the 3 planes.
//! In this situation the ray will be transported a 0 distance, meaning that it will stay at the focal spot.
//!
//! (Interesting information on ray-box intersection: http://tog.acm.org/resources/GraphicsGems/gems/RayBox.c)
//!
//! @param[in,out] position Particle position: initially set to the focal spot, returned transported inside the voxel bbox.
//! @param[out] direction Sampled particle direction (cosine vectors).
//! @param[out] intersection_flag Set to <0 if particle outside bbox and will not cross the voxels, not changed otherwise.
//! @param[out] size_bbox Size of the bounding box.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void move_to_bbox(float3* position, float3* direction, float3 size_bbox, int* intersection_flag)
{
float dist_y, dist_x, dist_z;
// -Distance to the nearest Y plane:
if ((direction->y) > EPS_SOURCE) // Moving to +Y: check distance to y=0 plane
{
// Check Y=0 (bbox wall):
if (position->y > 0.0f) // The input position must correspond to the focal spot => position->y == source_data_CONST.position[*num_p].y
dist_y = 0.0f; // No intersection with this plane: particle inside or past the box
// The actual distance would be negative but we set it to 0 bc we will not move the particle if no intersection exist.
else
dist_y = EPS_SOURCE + (-position->y)/(direction->y); // dist_y > 0 for sure in this case
}
else if ((direction->y) < NEG_EPS_SOURCE)
{
// Check Y=voxel_data_CONST.size_bbox.y:
if (position->y < size_bbox.y)
dist_y = 0.0f; // No intersection with this plane
else
dist_y = EPS_SOURCE + (size_bbox.y - position->y)/(direction->y); // dist_y > 0 for sure in this case
}
else // (direction->y)~0
dist_y = NEG_INF; // Particle moving parallel to the plane: no interaction possible (set impossible negative dist = -INFINITE)
// -Distance to the nearest X plane:
if ((direction->x) > EPS_SOURCE)
{
// Check X=0:
if (position->x > 0.0f)
dist_x = 0.0f;
else
dist_x = EPS_SOURCE + (-position->x)/(direction->x); // dist_x > 0 for sure in this case
}
else if ((direction->x) < NEG_EPS_SOURCE)
{
// Check X=voxel_data_CONST.size_bbox.x:
if (position->x < size_bbox.x)
dist_x = 0.0f;
else
dist_x = EPS_SOURCE + (size_bbox.x - position->x)/(direction->x); // dist_x > 0 for sure in this case
}
else
dist_x = NEG_INF;
// -Distance to the nearest Z plane:
if ((direction->z) > EPS_SOURCE)
{
// Check Z=0:
if (position->z > 0.0f)
dist_z = 0.0f;
else
dist_z = EPS_SOURCE + (-position->z)/(direction->z); // dist_z > 0 for sure in this case
}
else if ((direction->z) < NEG_EPS_SOURCE)
{
// Check Z=voxel_data_CONST.size_bbox.z:
if (position->z < size_bbox.z)
dist_z = 0.0f;
else
dist_z = EPS_SOURCE + (size_bbox.z - position->z)/(direction->z); // dist_z > 0 for sure in this case
}
else
dist_z = NEG_INF;
// -- Find the longest distance plane, which is the one that has to be crossed to enter the bbox.
// Storing the maximum distance in variable "dist_z". Distance will be =0 if no intersection exists or
// if the x ray is already inside the bbox.
if ( (dist_y>dist_x) && (dist_y>dist_z) )
dist_z = dist_y; // dist_z == dist_max
else if (dist_x>dist_z)
dist_z = dist_x;
// else
// dist_max = dist_z;
// -- Move particle from the focal spot (current location) to the bbox wall surface (slightly inside):
position->x += dist_z * direction->x;
position->y += dist_z * direction->y;
position->z += dist_z * direction->z;
// Check if the new position is outside the bbox. If true, the particle must be moved back to the focal spot location:
if ( (position->x < 0.0f) || (position->x > size_bbox.x) ||
(position->y < 0.0f) || (position->y > size_bbox.y) ||
(position->z < 0.0f) || (position->z > size_bbox.z) )
{
position->x -= dist_z * direction->x; // Reject new position undoing the previous translation
position->y -= dist_z * direction->y;
position->z -= dist_z * direction->z;
(*intersection_flag) = -111; // Particle outside the bbox AND not pointing to the bbox: set absvox<0 to skip interaction sampling.
}
}
////////////////////////////////////////////////////////////////////////////////
//! Upper limit of the number of random values sampled in a single track.
#define LEAP_DISTANCE 256
//! Multipliers and moduli for the two MLCG in RANECU.
#define a1_RANECU 40014
#define m1_RANECU 2147483563
#define a2_RANECU 40692
#define m2_RANECU 2147483399
////////////////////////////////////////////////////////////////////////////////
//! Initialize the pseudo-random number generator (PRNG) RANECU to a position
//! far away from the previous history (leap frog technique).
//!
//! Each calculated seed initiates a consecutive and disjoint sequence of
//! pseudo-random numbers with length LEAP_DISTANCE, that can be used to
//! in a parallel simulation (Sequence Splitting parallelization method).
//! The basic equation behind the algorithm is:
//! S(i+j) = (a**j * S(i)) MOD m = [(a**j MOD m)*S(i)] MOD m ,
//! which is described in:
//! P L'Ecuyer, Commun. ACM 31 (1988) p.742
//!
//! This function has been adapted from "seedsMLCG.f", see:
//! A Badal and J Sempau, Computer Physics Communications 175 (2006) p. 440-450
//!
//! @param[in] history Particle bach number.
//! @param[in] seed_input Initial PRNG seed input (used to initiate both MLCGs in RANECU).
//! @param[out] seed Initial PRNG seeds for the present history.
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void init_PRNG(int history_batch, int histories_per_thread, int seed_input, int2* seed)
{
// -- Move the RANECU generator to a unique position for the current batch of histories:
// I have to use an "unsigned long long int" value to represent all the simulated histories in all previous batches
// The maximum unsigned long long int value is ~1.8e19: if history >1.8e16 and LEAP_DISTANCE==1000, 'leap' will overflow.
// **** 1st MLCG:
unsigned long long int leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE);
int y = 1;
int z = a1_RANECU;
// -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j
// -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m):
// S(i+j) = [(a**j MOD m)*S(i)] MOD m
// S_i = abMODm(m,S_i,AjMODm)
seed->x = abMODm(m1_RANECU, seed_input, y); // Using the input seed as the starting seed
// **** 2nd MLCG (repeating the previous calculation for the 2nd MLCG parameters):
leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE);
y = 1;
z = a2_RANECU;
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m2_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m2_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm2 = y;
seed->y = abMODm(m2_RANECU, seed_input, y); // Using the input seed as the starting seed
}
/////////////////////////////////////////////////////////////////////
//! Calculate "(a1*a2) MOD m" with 32-bit integers and avoiding
//! the possible overflow, using the Russian Peasant approach
//! modulo m and the approximate factoring method, as described
//! in: L'Ecuyer and Cote, ACM Trans. Math. Soft. 17 (1991).
//!
//! This function has been adapted from "seedsMLCG.f", see:
//! Badal and Sempau, Computer Physics Communications 175 (2006)
//!
//! @param[in] m,a,s MLCG parameters
//! @return (a1*a2) MOD m
//
// Input: 0 < a1 < m
// 0 < a2 < m
//
// Return value: (a1*a2) MOD m
//
/////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__ __host__ // Function will be callable from host and also from device
#endif
inline int abMODm(int m, int a, int s)
{
// CAUTION: the input parameters are modified in the function but should not be returned to the calling function! (pass by value!)
int q, k;
int p = -m; // p is always negative to avoid overflow when adding
// ** Apply the Russian peasant method until "a =< 32768":
while (a>32768) // We assume '32' bit integers (4 bytes): 2^(('32'-2)/2) = 32768
{
if (0!=(a&1)) // Store 's' when 'a' is odd Equivalent code: if (1==(a%2))
{
p += s;
if (p>0) p -= m;
}
a >>= 1; // Half a (move bits 1 position right) Equivalent code: a = a/2;
s = (s-m) + s; // Double s (MOD m)
if (s<0) s += m; // (s is always positive)
}
// ** Employ the approximate factoring method (a is small enough to avoid overflow):
q = (int) m / a;
k = (int) s / q;
s = a*(s-k*q)-k*(m-q*a);
while (s<0)
s += m;
// ** Compute the final result:
p += s;
if (p<0) p += m;
return p;
}
////////////////////////////////////////////////////////////////////////////////
//! Pseudo-random number generator (PRNG) RANECU returning a float value
//! (single precision version).
//!
//! @param[in,out] seed PRNG seed (seed kept in the calling function and updated here).
//! @return PRN double value in the open interval (0,1)
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline float ranecu(int2* seed)
{
int i1 = (int)(seed->x/53668);
seed->x = 40014*(seed->x-i1*53668)-i1*12211;
int i2 = (int)(seed->y/52774);
seed->y = 40692*(seed->y-i2*52774)-i2*3791;
if (seed->x < 0) seed->x += 2147483563;
if (seed->y < 0) seed->y += 2147483399;
i2 = seed->x-seed->y;
if (i2 < 1) i2 += 2147483562;
#ifdef USING_CUDA
return (__int2float_rn(i2)*4.65661305739e-10f); // 4.65661305739e-10 == 1/2147483563
#else
return ((float)(i2)*4.65661305739e-10f);
#endif
}
////////////////////////////////////////////////////////////////////////////////
//! Pseudo-random number generator (PRNG) RANECU returning a double value.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline double ranecu_double(int2* seed)
{
int i1 = (int)(seed->x/53668);
seed->x = 40014*(seed->x-i1*53668)-i1*12211;
int i2 = (int)(seed->y/52774);
seed->y = 40692*(seed->y-i2*52774)-i2*3791;
if (seed->x < 0) seed->x += 2147483563;
if (seed->y < 0) seed->y += 2147483399;
i2 = seed->x-seed->y;
if (i2 < 1) i2 += 2147483562;
#ifdef USING_CUDA
return (__int2double_rn(i2)*4.6566130573917692e-10);
#else
return ((double)(i2)*4.6566130573917692e-10);
#endif
}
////////////////////////////////////////////////////////////////////////////////
//! Find the voxel that contains the current position.
//! Report the voxel absolute index and the x,y,z indices.
//! The structure containing the voxel number and size is read from CONSTANT memory.
//!
//! @param[in] position Particle position
//! @param[out] voxel_coord Pointer to three integer values (short3*) that will store the x,y and z voxel indices.
//! @return Returns "absvox", the voxel number where the particle is
//! located (negative if position outside the voxel bbox).
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline int locate_voxel(float3* position, short3* voxel_coord)
{
if ( (position->y < EPS_SOURCE) || (position->y > (voxel_data_CONST.size_bbox.y - EPS_SOURCE)) ||
(position->x < EPS_SOURCE) || (position->x > (voxel_data_CONST.size_bbox.x - EPS_SOURCE)) ||
(position->z < EPS_SOURCE) || (position->z > (voxel_data_CONST.size_bbox.z - EPS_SOURCE)) )
{
// -- Particle escaped the voxelized geometry (using EPS_SOURCE to avoid numerical precision errors):
return -1;
}
// -- Particle inside the voxelized geometry, find current voxel:
// The truncation from float to integer could give troubles for negative coordinates but this will never happen thanks to the IF at the begining of this function.
// (no need to use the CUDA function to convert float to integer rounding down (towards minus infinite): __float2int_rd)
register int voxel_coord_x, voxel_coord_y, voxel_coord_z;
#ifdef USING_CUDA
voxel_coord_x = __float2int_rd(position->x * voxel_data_CONST.inv_voxel_size.x);
voxel_coord_y = __float2int_rd(position->y * voxel_data_CONST.inv_voxel_size.y);
voxel_coord_z = __float2int_rd(position->z * voxel_data_CONST.inv_voxel_size.z);
#else
voxel_coord_x = (int)(position->x * voxel_data_CONST.inv_voxel_size.x);
voxel_coord_y = (int)(position->y * voxel_data_CONST.inv_voxel_size.y);
voxel_coord_z = (int)(position->z * voxel_data_CONST.inv_voxel_size.z);
#endif
// Output the voxel coordinates as short int (2 bytes) instead of int (4 bytes) to save registers; avoid type castings in the calculation of the return value.
voxel_coord->x = (short int) voxel_coord_x;
voxel_coord->y = (short int) voxel_coord_y;
voxel_coord->z = (short int) voxel_coord_z;
return (voxel_coord_x + voxel_coord_y*(voxel_data_CONST.num_voxels.x) + voxel_coord_z*(voxel_data_CONST.num_voxels.x)*(voxel_data_CONST.num_voxels.y));
}
//////////////////////////////////////////////////////////////////////
//! Rotates a vector; the rotation is specified by giving
//! the polar and azimuthal angles in the "self-frame", as
//! determined by the vector to be rotated.
//! This function is a literal translation from Fortran to C of
//! PENELOPE (v. 2006) subroutine "DIRECT".
//!
//! @param[in,out] (u,v,w) input vector (=d) in the lab. frame; returns the rotated vector components in the lab. frame
//! @param[in] costh cos(theta), angle between d before and after turn
//! @param[in] phi azimuthal angle (rad) turned by d in its self-frame
//
// Output:
// (u,v,w) -> rotated vector components in the lab. frame
//
// Comments:
// -> (u,v,w) should have norm=1 on input; if not, it is
// renormalized on output, provided norm>0.
// -> The algorithm is based on considering the turned vector
// d' expressed in the self-frame S',
// d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th))
// and then apply a change of frame from S' to the lab
// frame. S' is defined as having its z' axis coincident
// with d, its y' axis perpendicular to z and z' and its
// x' axis equal to y'*z'. The matrix of the change is then
// / uv/rho -v/rho u \
// S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5
// \ -rho 0 w /
// -> When rho=0 (w=1 or -1) z and z' are parallel and the y'
// axis cannot be defined in this way. Instead y' is set to
// y and therefore either x'=x (if w=1) or x'=-x (w=-1)
//////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void rotate_double(float3* direction, double costh, double phi) // !!DeBuG!! The direction vector is single precision but the rotation is performed in doule precision for increased accuracy.
{
double DXY, NORM, cosphi, sinphi, SDT;
DXY = direction->x*direction->x + direction->y*direction->y;
#ifdef USING_CUDA
sincos(phi, &sinphi,&cosphi); // Calculate the SIN and COS at the same time.
#else
sinphi = sin(phi); // Some CPU compilers will be able to use "sincos", but let's be safe.
cosphi = cos(phi);
#endif
// **** Ensure normalisation
NORM = DXY + direction->z*direction->z; // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!!
if (fabs(NORM-1.0)>1.0e-14)
{
NORM = 1.0/sqrt(NORM);
direction->x = NORM*direction->x;
direction->y = NORM*direction->y;
direction->z = NORM*direction->z;
DXY = direction->x*direction->x + direction->y*direction->y;
}
if (DXY>1.0e-28)
{
SDT = sqrt((1.0-costh*costh)/DXY);
float direction_x_in = direction->x;
direction->x = direction->x*costh + SDT*(direction_x_in*direction->z*cosphi-direction->y*sinphi);
direction->y = direction->y*costh+SDT*(direction->y*direction->z*cosphi+direction_x_in*sinphi);
direction->z = direction->z*costh-DXY*SDT*cosphi;
}
else
{
SDT = sqrt(1.0-costh*costh);
direction->y = SDT*sinphi;
if (direction->z>0.0)
{
direction->x = SDT*cosphi;
direction->z = costh;
}
else
{
direction->x =-SDT*cosphi;
direction->z =-costh;
}
}
}
//////////////////////////////////////////////////////////////////////
// ***********************************************************************
// * Translation of PENELOPE's "SUBROUTINE GRAa" from FORTRAN77 to C *
// ***********************************************************************
//! Sample a Rayleigh interaction using the sampling algorithm
//! used in PENELOPE 2006.
//!
//! @param[in] energy Particle energy (not modified with Rayleigh)
//! @param[out] costh_Rayleigh Cosine of the angular deflection
//! @param[in] material Current voxel material
//
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
// C PENELOPE/PENGEOM (version 2006) C
// C Copyright (c) 2001-2006 C
// C Universitat de Barcelona C
// C Permission to use, copy, modify, distribute and sell this software C
// C and its documentation for any purpose is hereby granted without C
// C fee, provided that the above copyright notice appears in all C
// C copies and that both that copyright notice and this permission C
// C notice appear in all supporting documentation. The Universitat de C
// C Barcelona makes no representations about the suitability of this C
// C software for any purpose. It is provided "as is" without express C
// C or implied warranty. C
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void GRAa(float *energy, double *costh_Rayleigh, int *mat, float *pmax_current, int2 *seed, struct rayleigh_struct* cgra)
{
/* **** Energy grid and interpolation constants for the current energy. */
double xmax = ((double)*energy) * 8.065535669099010e-5; // 8.065535669099010e-5 == 2.0*20.6074/510998.918
double x2max = min_value( (xmax*xmax) , ((double)cgra->xco[(*mat+1)*NP_RAYLEIGH - 1]) ); // Get the last tabulated value of xco for this mat
if (xmax < 0.01)
{
do
{
*costh_Rayleigh = 1.0 - ranecu_double(seed) * 2.0;
}
while ( ranecu_double(seed) > (((*costh_Rayleigh)*(*costh_Rayleigh)+1.0)*0.5) );
return;
}
for(;;) // (Loop will iterate everytime the sampled value is rejected or above maximum)
{
double ru = ranecu_double(seed) * (double)(*pmax_current); // Pmax for the current energy is entered as a parameter
/* **** Selection of the interval (binary search within pre-calculated limits). */
int itn = (int)(ru * (NP_RAYLEIGH-1)); // 'itn' will never reach the last interval 'NP_RAYLEIGH-1', but this is how RITA is implemented in PENELOPE
int i__ = (int)cgra->itlco[itn + (*mat)*NP_RAYLEIGH];
int j = (int)cgra->ituco[itn + (*mat)*NP_RAYLEIGH];
if ((j - i__) > 1)
{
do
{
register int k = (i__ + j)>>1; // >>1 == /2
if (ru > cgra->pco[k -1 + (*mat)*NP_RAYLEIGH])
i__ = k;
else
j = k;
}
while ((j - i__) > 1);
}
/* **** Sampling from the rational inverse cumulative distribution. */
int index = i__ - 1 + (*mat)*NP_RAYLEIGH;
double rr = ru - cgra->pco[index];
double xx;
if (rr > 1e-16)
{
double d__ = (double)(cgra->pco[index+1] - cgra->pco[index]);
float aco_index = cgra->aco[index], bco_index = cgra->bco[index], xco_index = cgra->xco[index]; // Avoid multiple accesses to the same global variable
xx = (double)xco_index + (double)(aco_index + 1.0f + bco_index)* d__* rr / (d__*d__ + (aco_index*d__ + bco_index*rr) * rr) * (double)(cgra->xco[index+1] - xco_index);
}
else
{
xx = cgra->xco[index];
}
if (xx < x2max)
{
// Sampled value below maximum possible value:
*costh_Rayleigh = 1.0 - 2.0 * xx / x2max; // !!DeBuG!! costh_Rayleigh in double precision, but not all intermediate steps are!?
/* **** Rejection: */
if (ranecu_double(seed) < (((*costh_Rayleigh)*(*costh_Rayleigh) + 1.0)*0.5))
break; // Sample value not rejected! break loop and return.
}
}
} /* graa */
//////////////////////////////////////////////////////////////////////////
// ***********************************************************************
// * Translation of PENELOPE's "SUBROUTINE GCOa" from FORTRAN77 to C *
// ********************************************************************* *
//! Random sampling of incoherent (Compton) scattering of photons, using
//! the sampling algorithm from PENELOPE 2006:
//! Relativistic impulse approximation with analytical one-electron Compton profiles
// !!DeBuG!! In penelope, Doppler broadening is not used for E greater than 5 MeV.
// We don't use it in GPU to reduce the lines of code and prevent using COMMON/compos/ZT(M)
//! @param[in,out] energy incident and final photon energy (eV)
//! @param[out] costh_Compton cosine of the polar scattering angle
//! @param[in] material Current voxel material
//! @param[in] seed RANECU PRNG seed
//
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
// C PENELOPE/PENGEOM (version 2006) C
// C Copyright (c) 2001-2006 C
// C Universitat de Barcelona C
// C Permission to use, copy, modify, distribute and sell this software C
// C and its documentation for any purpose is hereby granted without C
// C fee, provided that the above copyright notice appears in all C
// C copies and that both that copyright notice and this permission C
// C notice appear in all supporting documentation. The Universitat de C
// C Barcelona makes no representations about the suitability of this C
// C software for any purpose. It is provided "as is" without express C
// C or implied warranty. C
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//
// ************************************************************************
#ifdef USING_CUDA
__device__
#endif
inline void GCOa(float *energy, double *costh_Compton, int *mat, int2 *seed, struct compton_struct* cgco_SHARED)
{
float s, a1, s0, af, ek, ek2, ek3, tau, pzomc, taumin;
float rn[MAX_SHELLS];
double cdt1;
// Some variables used in PENELOPE have been eliminated to save register: float aux, taum2, fpzmax, a, a2, ek1 ,rni, xqc, fpz, pac[MAX_SHELLS];
int i__;
int my_noscco = cgco_SHARED->noscco[*mat]; // Store the number of oscillators for the input material in a local variable
#ifndef USING_CUDA
static int warning_flag_1 = -1, warning_flag_2 = -1, warning_flag_3 = -1; // Write warnings for the CPU code, but only once. !!DeBuG!!
#endif
ek = *energy * 1.956951306108245e-6f; // (1.956951306108245e-6 == 1.0/510998.918)
ek2 = ek * 2.f + 1.f;
ek3 = ek * ek;
// ek1 = ek3 - ek2 - 1.;
taumin = 1.f / ek2;
// taum2 = taumin * taumin;
a1 = logf(ek2);
// a2 = a1 + ek * 2. * (ek + 1.) * taum2; // a2 was used only once, code moved below
/* **** Incoherent scattering function for theta=PI. */
s0 = 0.0f;
for (i__ = 0; i__ < my_noscco; i__++)
{
register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS];
if (temp < *energy)
{
register float aux = *energy * (*energy - temp) * 2.f;
#ifdef USING_CUDA
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f;
// 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!!
#else
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f);
#endif
if (pzomc > 0.0f)
temp = (0.707106781186545f+pzomc*1.4142135623731f) * (0.707106781186545f+pzomc*1.4142135623731f);
else
temp = (0.707106781186545f-pzomc*1.4142135623731f) * (0.707106781186545f-pzomc*1.4142135623731f);
temp = 0.5f * expf(0.5f - temp); // Calculate EXP outside the IF to avoid branching
if (pzomc > 0.0f)
temp = 1.0f - temp;
s0 += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp;
}
}
/* **** Sampling tau. */
do
{
if (ranecu(seed)*/*a2=*/(a1+2.*ek*(ek+1.f)*taumin*taumin) < a1)
{
tau = powf(taumin, ranecu(seed)); // !!DeBuG!! "powf()" has a big error (7 ULP), the double version has only 2!!
}
else
{
tau = sqrtf(1.f + ranecu(seed) * (taumin * taumin - 1.f));
}
cdt1 = (double)(1.f-tau) / (((double)tau)*((double)*energy)*1.956951306108245e-6); // !!DeBuG!! The sampled COS will be double precision, but TAU is not!!!
if (cdt1 > 2.0) cdt1 = 1.99999999; // !!DeBuG!! Make sure that precision error in POW, SQRT never gives cdt1>2 ==> costh_Compton<-1
/* **** Incoherent scattering function. */
s = 0.0f;
for (i__ = 0; i__ < my_noscco; i__++)
{
register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS];
if (temp < *energy)
{
register float aux = (*energy) * (*energy - temp) * ((float)cdt1);
if ((aux>1.0e-12f)||(temp>1.0e-12f)) // !!DeBuG!! Make sure the SQRT argument is never <0, and that we never get 0/0 -> NaN when aux=temp=0 !!
{
#ifdef USING_CUDA
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f;
// 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!!
#else
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f);
#endif
}
else
{
pzomc = 0.002f; // !!DeBuG!! Using a rough approximation to a sample value of pzomc found using pure double precision: NOT RIGUROUS! But this code is expected to be used very seldom, only in extreme cases.
#ifndef USING_CUDA
if (warning_flag_1<0)
{
warning_flag_1 = +1; // Disable warning, do not show again
printf(" [... Small numerical precision error detected computing \"pzomc\" in GCOa (this warning will not be repeated).]\n i__=%d, aux=%.14f, temp=%.14f, pzomc(forced)=%.14f, uico=%.14f, energy=%.7f, cgco_SHARED->fj0=%.14f, mat=%d, cdt1=%.14lf\n", (int)i__, aux, temp, pzomc, cgco_SHARED->uico[*mat+i__*MAX_MATERIALS], *energy, cgco_SHARED->fj0[*mat+i__*MAX_MATERIALS], (int)*mat, cdt1); // !!DeBuG!!
}
#endif
}
temp = pzomc * 1.4142135623731f;
if (pzomc > 0.0f)
temp = 0.5f - (temp + 0.70710678118654502f) * (temp + 0.70710678118654502f); // Calculate exponential argument
else
temp = 0.5f - (0.70710678118654502f - temp) * (0.70710678118654502f - temp);
temp = 0.5f * expf(temp); // All threads will calculate the expf together
if (pzomc > 0.0f)
temp = 1.0f - temp;
s += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp;
rn[i__] = temp;
}
}
} while( (ranecu(seed)*s0) > (s*(1.0f+tau*(/*ek1=*/(ek3 - ek2 - 1.0f)+tau*(ek2+tau*ek3)))/(ek3*tau*(tau*tau+1.0f))) ); // **** Rejection function
*costh_Compton = 1.0 - cdt1;
/* **** Target electron shell. */
for (;;)
{
register float temp = s*ranecu(seed);
float pac = 0.0f;
int ishell = my_noscco - 1; // First shell will have number 0
for (i__ = 0; i__ < (my_noscco-1); i__++) // !!DeBuG!! Iterate to (my_noscco-1) only: the last oscillator is excited in case all other fail (no point in double checking) ??
{
pac += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * rn[i__]; // !!DeBuG!! pac[] is calculated on the fly to save registers!
if (pac > temp) // pac[] is calculated on the fly to save registers!
{
ishell = i__;
break;
}
}
/* **** Projected momentum of the target electron. */
temp = ranecu(seed) * rn[ishell];
if (temp < 0.5f)
{
pzomc = (0.70710678118654502f - sqrtf(0.5f - logf(temp + temp))) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f);
}
else
{
pzomc = (sqrtf(0.5f - logf(2.0f - 2.0f*temp)) - 0.70710678118654502f) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f);
}
if (pzomc < -1.0f)
{
continue; // re-start the loop
}
/* **** F(EP) rejection. */
temp = tau * (tau - (*costh_Compton) * 2.f) + 1.f; // this variable was originally called "xqc"
// af = sqrt( max_value(temp,1.0e-30f) ) * (tau * (tau - *costh_Compton) / max_value(temp,1.0e-30f) + 1.f); //!!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!!
if (temp>1.0e-20f) // !!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!!
{
af = sqrtf(temp) * (tau * (tau - ((float)(*costh_Compton))) / temp + 1.f);
}
else
{
// When using single precision, it is possible (but very uncommon) to get costh_Compton==1 and tau==1; then temp is 0 and 'af' can not be calculated (0/0 -> nan). Analysing the results obtained using double precision, we found that 'af' would be almost 0 in this situation, with an "average" about ~0.002 (this is just a rough estimation, but using af=0 the value would never be rejected below).
af = 0.00200f; // !!DeBuG!!
#ifndef USING_CUDA
if (warning_flag_2<0)
{
warning_flag_2 = +1; // Disable warning, do not show again
printf(" [... Small numerical precision error detected computing \"af\" in GCOa (this warning will not be repeated)].\n xqc=%.14f, af(forced)=%.14f, tau=%.14f, costh_Compton=%.14lf\n", temp, af, tau, *costh_Compton); // !!DeBuG!!
}
#endif
}
if (af > 0.0f)
{
temp = af * 0.2f + 1.f; // this variable was originally called "fpzmax"
}
else
{
temp = 1.f - af * 0.2f;
}
if ( ranecu(seed)*temp < /*fpz =*/(af * max_value( min_value(pzomc,0.2f) , -0.2f ) + 1.f) )
{
break;
}
}
/* **** Energy of the scattered photon. */
{
register float t, b1, b2, temp;
t = pzomc * pzomc;
b1 = 1.f - t * tau * tau;
b2 = 1.f - t * tau * ((float)(*costh_Compton));
temp = sqrtf( fabsf(b2 * b2 - b1 * (1.0f - t)) );
if (pzomc < 0.0f)
temp *= -1.0f;
// !Error! energy may increase (slightly) due to inacurate calculation! !!DeBuG!!
t = (tau / b1) * (b2 + temp);
if (t > 1.0f)
{
#ifndef USING_CUDA
#endif
#ifndef USING_CUDA
if (warning_flag_3<0)
{
warning_flag_3 = +1; // Disable warning, do not show again
printf("\n [... a Compton event tried to increase the x ray energy due to precision error. Keeping initial energy. (This warning will not be repeated.)]\n scaling=%.14f, costh_Compton=%.14lf\n", t, *costh_Compton); // !!DeBuG!!
}
#endif
t = 1.0f; // !!DeBuG!! Avoid increasing energy by hand!!! not nice!!
}
(*energy) *= t;
// (*energy) *= (tau / b1) * (b2 + temp); // Original PENELOPE code
}
} // [End subroutine GCOa]
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Tally the depose deposited inside each material.
//! This function is called whenever a particle suffers a Compton or photoelectric
//! interaction. The energy released in each interaction is added and later in the
//! report function the total deposited energy is divided by the total mass of the
//! material in the voxelized object to get the dose. This naturally accounts for
//! multiple densities for voxels with the same material (not all voxels have same mass).
//! Electrons are not transported in MC-GPU and therefore we are approximating
//! that the dose is equal to the KERMA (energy released by the photons alone).
//! This approximation is acceptable when there is electronic equilibrium and
//! when the range of the secondary electrons is shorter than the organ size.
//!
//! The function uses atomic functions for a thread-safe access to the GPU memory.
//! We can check if this tally was disabled in the input file checking if the array
//! materials_dose was allocated in the GPU (disabled if pointer = NULL).
//!
//!
//! @param[in] Edep Energy deposited in the interaction
//! @param[in] material Current material id number
//! @param[out] materials_dose ulonglong2 array storing the mateials dose [in eV/g] and dose^2 (ie, uncertainty).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline
void tally_materials_dose(float* Edep, int* material, ulonglong2* materials_dose)
{
// !!DeBuG!! The energy can be tallied directly with atomicAdd in global memory or using shared memory first and then global for whole block if too slow. With the initial testing it looks like using global memory is already very fast!
// !!DeBuG!! WARNING: with many histories and few materials the materials_dose integer variables may overflow!! Using double precision floats would be better. Single precision is not good enough because adding small energies to a large counter would give problems.
#ifdef USING_CUDA
atomicAdd(&materials_dose[*material].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the material, scaled by the factor SCALE_eV and rounded.
atomicAdd(&materials_dose[*material].y, __float2ull_rn((*Edep)*(*Edep)) ); // Square of the dose to estimate standard deviation (not using SCALE_eV for std_dev to prevent overflow)
#else
materials_dose[*material].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f);
materials_dose[*material].y += (unsigned long long int)((*Edep)*(*Edep) + 0.5f);
#endif
return;
}
| e09ed747a56bba44128c9ac65086371a9aa6b2bd.cu |
////////////////////////////////////////////////////////////////////////////////
//
// ****************************
// *** MC-GPU , version 1.3 ***
// ****************************
//
//! Definition of the CUDA GPU kernel for the simulation of x ray tracks in a voxelized geometry.
//! This kernel has been optimized to yield a good performance in the GPU but can still be
//! compiled in the CPU without problems. All the CUDA especific commands are enclosed in
//! pre-processor directives that are skipped if the parameter "USING_CUDA" is not defined
//! at compilation time.
//
// ** DISCLAIMER **
//
// This software and documentation (the "Software") were developed at the Food and
// Drug Administration (FDA) by employees of the Federal Government in the course
// of their official duties. Pursuant to Title 17, Section 105 of the United States
// Code, this work is not subject to copyright protection and is in the public
// domain. Permission is hereby granted, free of charge, to any person obtaining a
// copy of the Software, to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, or sell copies of the Software or derivatives, and to permit persons
// to whom the Software is furnished to do so. FDA assumes no responsibility
// whatsoever for use by other parties of the Software, its source code,
// documentation or compiled executables, and makes no guarantees, expressed or
// implied, about its quality, reliability, or any other characteristic. Further,
// use of this code in no way implies endorsement by the FDA or confers any
// advantage in regulatory decisions. Although this software can be redistributed
// and/or modified freely, we ask that any derivative works bear some notice that
// they are derived from it, and any modified versions bear some notice that they
// have been modified.
//
//
//! @file MC-GPU_kernel_v1.3.cu
//! @author Andreu Badal ([email protected])
//! @date 2012/12/12
// -- Original code started on: 2009/04/14
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Initialize the image array, ie, set all pixels to zero
//! Essentially, this function has the same effect as the command:
//! "cutilSafeCall(cudaMemcpy(image_device, image, image_bytes, cudaMemcpyHostToDevice))";
//!
//! CUDA performs some initialization work the first time a GPU kernel is called.
//! Therefore, calling a short kernel before the real particle tracking is performed
//! may improve the accuracy of the timing measurements in the relevant kernel.
//!
//! @param[in,out] image Pointer to the image array.
//! @param[in] pixels_per_image Number of pixels in the image (ie, elements in the array).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__global__
void init_image_array_GPU(unsigned long long int* image, int pixels_per_image)
{
int my_pixel = threadIdx.x + blockIdx.x*blockDim.x;
if (my_pixel < pixels_per_image)
{
// -- Set the current pixel to 0 and return, avoiding overflow when more threads than pixels are used:
image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image
}
}
// ////////////////////////////////////////////////////////////////////////////////
// //! Initialize the dose deposition array, ie, set all voxel doses to zero
// //!
// //! @param[in,out] dose Pointer to the dose mean and sigma arrays.
// //! @param[in] num_voxels_dose Number of voxels in the dose ROI (ie, elements in the arrays).
// ////////////////////////////////////////////////////////////////////////////////
// __global__
// void init_dose_array_GPU(ulonglong2* voxels_Edep, int num_voxels_dose)
// {
// int my_voxel = threadIdx.x + blockIdx.x*blockDim.x;
// register ulonglong2 ulonglong2_zero;
// ulonglong2_zero.x = ulonglong2_zero.y = (unsigned long long int) 0;
// if (my_voxel < num_voxels_dose)
// {
// dose[my_voxel] = ulonglong2_zero; // Set the current voxel to (0,0) and return, avoiding overflow
// }
// }
#endif
////////////////////////////////////////////////////////////////////////////////
//! Main function to simulate x-ray tracks inside a voxelized geometry.
//! Secondary electrons are not simulated (in photoelectric and Compton
//! events the energy is locally deposited).
//!
//! The following global variables, in the GPU __constant__ memory are used:
//! voxel_data_CONST,
//! source_energy_data_CONST,
//! detector_data_CONST,
//! mfp_table_data_CONST.
//!
//! @param[in] history_batch Particle batch number (only used in the CPU version when CUDA is disabled!, the GPU uses the built-in variable threadIdx)
//! @param[in] num_p Projection number in the CT simulation. This variable defines a specific angle and the corresponding source and detector will be used.
//! @param[in] histories_per_thread Number of histories to simulate for each call to this function (ie, for GPU thread).
//! @param[in] seed_input Random number generator seed (the same seed is used to initialize the two MLCGs of RANECU).
//! @param[in] voxel_mat_dens Pointer to the voxel densities and material vector (the voxelized geometry), stored in GPU glbal memory.
//! @param[in] mfp_Woodcock_table Two parameter table for the linear interpolation of the Woodcock mean free path (MFP) (stored in GPU global memory).
//! @param[in] mfp_table_a First element for the linear interpolation of the interaction mean free paths (stored in GPU global memory).
//! @param[in] mfp_table_b Second element for the linear interpolation of the interaction mean free paths (stored in GPU global memory).
//! @param[in] rayleigh_table Pointer to the table with the data required by the Rayleigh interaction sampling, stored in GPU global memory.
//! @param[in] compton_table Pointer to the table with the data required by the Compton interaction sampling, stored in GPU global memory.
//! @param[in,out] image Pointer to the image vector in the GPU global memory.
//! @param[in,out] dose Pointer to the array containing the 3D voxel dose (and its uncertainty) in the GPU global memory.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__global__ void track_particles(int histories_per_thread,
int num_p, // For a CT simulation: allocate space for up to MAX_NUM_PROJECTIONS projections.
int seed_input,
unsigned long long int* image,
ulonglong2* voxels_Edep,
float2* voxel_mat_dens,
float2* mfp_Woodcock_table,
float3* mfp_table_a,
float3* mfp_table_b,
struct rayleigh_struct* rayleigh_table,
struct compton_struct* compton_table,
struct detector_struct* detector_data_array,
struct source_struct* source_data_array,
ulonglong2* materials_dose)
#else
void track_particles(int history_batch, // This variable is not required in the GPU, it uses the thread ID
int histories_per_thread,
int num_p,
int seed_input,
unsigned long long int* image,
ulonglong2* voxels_Edep,
float2* voxel_mat_dens,
float2* mfp_Woodcock_table,
float3* mfp_table_a,
float3* mfp_table_b,
struct rayleigh_struct* rayleigh_table,
struct compton_struct* compton_table,
struct detector_struct* detector_data_array,
struct source_struct* source_data_array,
ulonglong2* materials_dose)
#endif
{
// -- Declare the track state variables:
float3 position, direction;
float energy, step, prob, randno, mfp_density, mfp_Woodcock;
float3 mfp_table_read_a, mfp_table_read_b;
int2 seed;
int index;
int material0, // Current material, starting at 0 for 1st material
material_old; // Flag to mark a material or energy change
signed char scatter_state; // Flag for scatter images: scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter.
// -- Store the Compton table in shared memory from global memory:
// For Compton and Rayleigh the access to memory is not coherent and the caching capability do not speeds up the accesses, they actually slows down the acces to other data.
#ifdef USING_CUDA
__shared__
#endif
struct compton_struct cgco_SHARED;
#ifdef USING_CUDA
__shared__
#endif
struct detector_struct detector_data_SHARED;
#ifdef USING_CUDA
__shared__
#endif
struct source_struct source_data_SHARED;
#ifdef USING_CUDA
if (0==threadIdx.x) // First GPU thread copies the variables to shared memory
{
#endif
// -Copy the current source, detector data from global to shared memory for fast access:
source_data_SHARED = source_data_array[num_p];
detector_data_SHARED = detector_data_array[num_p]; // Copy the long array to a single instance in shared memory for the current projection
// -Copy the compton data to shared memory:
cgco_SHARED = *compton_table;
#ifdef USING_CUDA
}
__syncthreads(); // Make sure all threads will see the initialized shared variable
#endif
// -- Initialize the RANECU generator in a position far away from the previous history:
#ifdef USING_CUDA
init_PRNG((threadIdx.x + blockIdx.x*blockDim.x), histories_per_thread, seed_input, &seed); // Using a 1D block
#else
init_PRNG(history_batch, histories_per_thread, seed_input, &seed);
#endif
// -- Loop for the "histories_per_thread" particles in the current history_batch:
for( ; histories_per_thread>0; histories_per_thread--)
{
// printf("\n\n********* NEW HISTORY: %d [seeds: %d, %d]\n\n", histories_per_thread, seed.x, seed.y); // fflush(stdout); // !!Verbose!! calling printf from the GPU is possible but if multiple threads call it at the same time some output will be lost.
int absvox = 1;
// -- Call the source function to get a primary x ray:
source(&position, &direction, &energy, &seed, &absvox, &source_data_SHARED, &detector_data_SHARED);
scatter_state = (signed char)0; // Reset previous scatter state: new non-scattered particle loaded
// -- Find the current energy bin by truncation (this could be pre-calculated for a monoenergetic beam):
// The initialization host code made sure that the sampled energy will always be within the tabulated energies (index never negative or too large).
#ifdef USING_CUDA
index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite)
#else
index = (int)((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide + 0.00001f); // Adding EPSILON to truncate to INT towards minus infinite. There may be a small error for energy<=mfp_table_data_CONST.e0 but this case is irrelevant (particles will always have more energy than e0).
#endif
// -- Get the minimum mfp at the current energy using linear interpolation (Woodcock tracking):
{
float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory
mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP
}
// -- Reset previous material to force a recalculation of the MFPs (negative materials are not allowed in the voxels):
material_old = -1;
// *** X-ray interaction loop:
for(;;)
{
if (absvox<0) // !!DeBuG!! MC-GPU_v1.3 ==> if I move this "if" above the code runs much slower!? Why???
break; // -- Primary particle was not pointing to the voxel region! (but may still be detected after moving in vacuum in a straight line).
// *** Virtual interaction loop: // New loop structure in MC-GPU_v1.3: simulate all virtual events before sampling Compton & Rayleigh: // !!DeBuG!!
float2 matdens;
short3 voxel_coord; // Variable used only by DOSE TALLY
do
{
step = -(mfp_Woodcock)*logf(ranecu(&seed)); // Using the minimum MFP in the geometry for the input energy (Woodcock tracking)
position.x += step*direction.x;
position.y += step*direction.y;
position.z += step*direction.z;
// -- Locate the new particle in the voxel geometry:
absvox = locate_voxel(&position, &voxel_coord); // Get the voxel number at the current position and the voxel coordinates (used to check if inside the dose ROI in DOSE TALLY).
if (absvox<0)
break; // -- Particle escaped the voxel region! ("index" is still >0 at this moment)
matdens = voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory
material0 = (int)(matdens.x - 1); // Set the current material by truncation, and set 1st material to value '0'.
// -- Get the data for the linear interpolation of the interaction MFPs, in case the energy or material have changed:
if (material0 != material_old)
{
mfp_table_read_a = mfp_table_a[index*(MAX_MATERIALS)+material0];
mfp_table_read_b = mfp_table_b[index*(MAX_MATERIALS)+material0];
material_old = material0; // Store the new material
}
// *** Apply Woodcock tracking:
mfp_density = mfp_Woodcock * matdens.y;
// -- Calculate probability of delta scattering, using the total mean free path for the current material and energy (linear interpolation):
prob = 1.0f - mfp_density * (mfp_table_read_a.x + energy * mfp_table_read_b.x);
randno = ranecu(&seed); // Sample uniform PRN
}
while (randno<prob); // [Iterate if there is a delta scattering event]
if (absvox<0)
break; // -- Particle escaped the voxel region! Break the interaction loop to call tally image.
// The GPU threads will be stopped and waiting here until ALL threads have a REAL event:
// -- Real event takes place! Check the kind of event and sample the effects of the interaction:
prob += mfp_density * (mfp_table_read_a.y + energy * mfp_table_read_b.y); // Interpolate total Compton MFP ('y' component)
if (randno<prob) // [Checking Compton scattering]
{
// *** Compton interaction:
// -- Sample new direction and energy:
double costh_Compton;
randno = energy; // Save temporal copy of the particle energy (variable randno not necessary until next sampling). DOSE TALLY
GCOa(&energy, &costh_Compton, &material0, &seed, &cgco_SHARED);
rotate_double(&direction, costh_Compton, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed));
randno = energy - randno; // Save temporal copy of the negative of the energy lost in the interaction. DOSE TALLY
// -- Find the new energy interval:
#ifdef USING_CUDA
index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite)
#else
index = (int)((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide + 0.00001f); // Adding EPSILON to truncate to INT
#endif
if (index>-1) // 'index' will be negative only when the energy is below the tabulated minimum energy: particle will be then absorbed (rejected) after tallying the dose.
{
// -- Get the Woodcock MFP for the new energy (energy above minimum cutoff):
float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory
mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP
material_old = -2; // Set an impossible material to force an update of the MFPs data for the nex energy interval
// -- Update scatter state:
if (scatter_state==(signed char)0)
scatter_state = (signed char)1; // Set scatter_state == 1: Compton scattered particle
else
scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle
}
}
else
{
prob += mfp_density * (mfp_table_read_a.z + energy * mfp_table_read_b.z); // Interpolate total Rayleigh MFP ('z' component)
if (randno<prob) // [Checking Rayleigh scattering]
{
// *** Rayleigh interaction:
// -- Sample angular deflection:
double costh_Rayleigh;
float pmax_current = rayleigh_table->pmax[(index+1)*MAX_MATERIALS+material0]; // Get max (ie, value for next bin?) cumul prob square form factor for Rayleigh sampling
GRAa(&energy, &costh_Rayleigh, &material0, &pmax_current, &seed, rayleigh_table);
rotate_double(&direction, costh_Rayleigh, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed));
// -- Update scatter state:
if (scatter_state==(signed char)0)
scatter_state = (signed char)2; // Set scatter_state == 1: Rayleigh scattered particle
else
scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle
}
else
{
// *** Photoelectric interaction (or pair production): mark particle for absorption after dose tally (ie, index<0)!
randno = -energy; // Save temporal copy of the (negative) energy deposited in the interaction (variable randno not necessary anymore).
index = -11; // A negative "index" marks that the particle was absorved and that it will never arrive at the detector.
}
}
// -- Tally the dose deposited in Compton and photoelectric interactions:
if (randno<-0.001f)
{
float Edep = -1.0f*randno; // If any energy was deposited, this variable will temporarily store the negative value of Edep.
// -- Tally the dose deposited in the current material, if enabled (ie, array allocated and not null):
if (materials_dose!=NULL)
tally_materials_dose(&Edep, &material0, materials_dose); // !!tally_materials_dose!!
// -- Tally the energy deposited in the current voxel, if enabled (tally disabled when dose_ROI_x_max_CONST is negative). DOSE TALLY
if (dose_ROI_x_max_CONST > -1)
tally_voxel_energy_deposition(&Edep, &voxel_coord, voxels_Edep);
}
// -- Break interaction loop for particles that have been absorved or with energy below the tabulated cutoff: particle is "absorbed" (ie, track discontinued).
if (index<0)
break;
} // [Cycle the X-ray interaction loop]
if (index>-1)
{
// -- Particle escaped the voxels but was not absorbed, check if it will arrive at the detector and tally its energy:
tally_image(&energy, &position, &direction, &scatter_state, image, &source_data_SHARED, &detector_data_SHARED);
}
} // [Continue with a new history]
} // [All tracks simulated for this kernel call: return to CPU]
////////////////////////////////////////////////////////////////////////////////
//! Tally the dose deposited in the voxels.
//! This function is called whenever a particle suffers a Compton or photoelectric
//! interaction. It is not necessary to call this function if the dose tally
//! was disabled in the input file (ie, dose_ROI_x_max_CONST < 0).
//! Electrons are not transported in MC-GPU and therefore we are approximating
//! that the dose is equal to the KERMA (energy released by the photons alone).
//! This approximation is acceptable when there is electronic equilibrium and when
//! the range of the secondary electrons is shorter than the voxel size. Usually the
//! doses will be acceptable for photon energies below 1 MeV. The dose estimates may
//! not be accurate at the interface of low density volumes.
//!
//! We need to use atomicAdd() in the GPU to prevent that multiple threads update the
//! same voxel at the same time, which would result in a lose of information.
//! This is very improbable when using a large number of voxels but gives troubles
//! with a simple geometries with few voxels (in this case the atomicAdd will slow
//! down the code because threads will update the voxel dose secuentially).
//!
//!
//! @param[in] Edep Energy deposited in the interaction
//! @param[in] voxel_coord Voxel coordinates, needed to check if particle located inside the input region of interest (ROI)
//! @param[out] voxels_Edep ulonglong2 array containing the 3D voxel dose and dose^2 (ie, uncertainty) as unsigned integers scaled by SCALE_eV.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline
void tally_voxel_energy_deposition(float* Edep, short3* voxel_coord, ulonglong2* voxels_Edep)
{
// !!DeBuG!! Maybe it would be faster to store a 6 element struct and save temp copy?? struct_short_int_x6_align16 dose_ROI_size = dose_ROI_size_CONST; // Get ROI coordinates from GPU constant memory and store temporal copy
if((voxel_coord->x < dose_ROI_x_min_CONST) || (voxel_coord->x > dose_ROI_x_max_CONST) ||
(voxel_coord->y < dose_ROI_y_min_CONST) || (voxel_coord->y > dose_ROI_y_max_CONST) ||
(voxel_coord->z < dose_ROI_z_min_CONST) || (voxel_coord->z > dose_ROI_z_max_CONST))
{
return; // -- Particle outside the ROI: return without tallying anything.
}
// -- Particle inside the ROI: tally Edep.
register int DX = 1 + (int)(dose_ROI_x_max_CONST - dose_ROI_x_min_CONST);
register int num_voxel = (int)(voxel_coord->x-dose_ROI_x_min_CONST) + ((int)(voxel_coord->y-dose_ROI_y_min_CONST))*DX + ((int)(voxel_coord->z-dose_ROI_z_min_CONST))*DX*(1 + (int)(dose_ROI_y_max_CONST-dose_ROI_y_min_CONST));
#ifdef USING_CUDA
atomicAdd(&voxels_Edep[num_voxel].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the voxel, scaled by the factor SCALE_eV and rounded.
atomicAdd(&voxels_Edep[num_voxel].y, __float2ull_rn((*Edep)*(*Edep)) ); // (not using SCALE_eV for std_dev to prevent overflow)
#else
voxels_Edep[num_voxel].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f);
voxels_Edep[num_voxel].y += (unsigned long long int)((*Edep)*(*Edep) + 0.5f);
#endif
return;
}
////////////////////////////////////////////////////////////////////////////////
//! Tally a radiographic projection image.
//! This function is called whenever a particle escapes the voxelized volume.
//! The code checks if the particle would arrive at the detector if it kept
//! moving in a straight line after exiting the voxels (assuming vacuum enclosure).
//! An ideal image formation model is implemented: each pixel counts the total energy
//! of the x rays that enter the pixel (100% detection efficiency for any energy).
//! The image due to primaries and different kinds of scatter is tallied separately.
//!
//! In the GPU, and atomicAdd() function is used to make sure that multiple threads do
//! not update the same pixel at the same time, which would result in a lose of information.
//! Since the atomicAdd function is only available for 'unsigned long long int' data,
//! the float pixel values are scaled by a factor "SCALE_eV" defined in the header file
//! (eg, #define SCALE_eV 10000.0f) and stored as unsigned long long integers in main
//! memory.
//!
//! WARNING! If the total tallied signal (for all particles) is larger than "1.8e19/SCALE_eV",
//! there will be a bit overflow and the value will be reset to 0 giving bogus results.
//!
//! WARNING! The detector plane should be located outside the voxels bounding box. However, since
//! the particles are moved outside the bbox in the last step, they could cross the detector
//! plane anyway. If the particles are less than 2.0 cm behind the detector, they are moved
//! back and detected. Therefore the detector can be a few cm inside the bbox and still work.
//! If the Woodcock mean free path is larger than the distance from the bbox to the detector,
//! we may lose some particles behind the detector!
//!
//!
//! @param[in] energy X-ray energy
//! @param[in] position Particle position
//! @param[in] direction Particle direction (cosine vectors)
//! @param[in] scatter_state Flag marking primaries, single Compton, single Rayleigh or multiple scattered radiation
//! @param[out] image Integer array containing the image, ie, the pixel values (in tenths of meV)
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void tally_image(float* energy, float3* position, float3* direction, signed char* scatter_state, unsigned long long int* image, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED)
{
float dist_detector, rotated_position;
if (detector_data_SHARED->rotation_flag == 1) // --> Initial source direction is not (0,1,0): detector has to be rotated to +Y to find the pixel number
{
// *** Skip particles not moving towards the detector.
// (a) Skip particles that were deflected more than 90 deg from the original source direction (backscatter).
// (b) Skip particles located more than 10 cm behind the detector
// (c) Skip particles for which the direction to the detector is way bigger than SDD (likely to intersect the plane outside the pixel region).
// !!DeBuG!! NOTE: This may give problems for big detectors very close to the source
// !!DeBuG!! Particles located after the detector will be moved back to the surface of the detector, but 10 cm maximum!!
// In this way the detector can intersect the voxels bbox or be located right on the surface of the bbox: the particles will be
// transported across the detector and until a little after the end of the bbox in the last step, but then moved back.
// This algorithm will give correct results ONLY when the detector intersects just slightly the air space around the phantom,
// so that the interactions after the detector are not significant (this happens sometimes using oblique beams).
// I could remove particles after the detector using "if (dist_detector<0.0f) return;".
// (a) Calculate the angle between the particle and the initial direction (dot product): reject particle if cos_angle < cos(89)==0 (angle>89deg):
// [Extra parenthesis are coded to suggest to the compiler the use of intrinsic multiply-add operations].
register float cos_angle = direction->x * source_data_SHARED->direction.x +
(direction->y * source_data_SHARED->direction.y +
(direction->z * source_data_SHARED->direction.z));
if (cos_angle < 0.025f)
return; // Reject particle: Angle larger than 89 deg --> particle moving parallel to the detector or backwards towards the source!
// (b) Find the distance from the current particle location (likely just after the surface of the voxel bbox) to the intersection with the detector plane:
dist_detector = ( source_data_SHARED->direction.x * (detector_data_SHARED->center.x - position->x) +
(source_data_SHARED->direction.y * (detector_data_SHARED->center.y - position->y) +
(source_data_SHARED->direction.z * (detector_data_SHARED->center.z - position->z))) ) / cos_angle;
// !!DeBuG!! IF's below (used in v1.2) are not needed when checking the x ray angle:
// if (dist_detector < -10.0f) // !!DeBuG!! Is 10 cm enough or too much? Should I use 0? or allow any distance?
// return; // !!DeBuG!! Reject particles located more than 10 cm behind the detector. 10 cm was selected arbitrarily. Woodcock MFP for x-rays in bone: MFP 200 keV photons in bone ==> 4 cm.
//
// if (fabsf(dist_detector)>(2.1f*detector_data_CONST.sdd))
// return; // Reject particle: distance to the detector plane too large, the particle is likely to travel almost parallel to the detector and will not be detected.
// *** Translate the particle to the detector plane (we assume the detector is completely absorbent: 100% detection efficiency):
position->x = position->x + dist_detector * direction->x;
position->y = position->y + dist_detector * direction->y;
position->z = position->z + dist_detector * direction->z;
// *** Rotate the particle position vector to the default reference system where the detector is perpendicular to the +Y axis, then find out if the particle is located inside a pixel:
#ifdef USING_CUDA
rotated_position = detector_data_SHARED->rot_inv[0]*position->x + detector_data_SHARED->rot_inv[1]*position->y + detector_data_SHARED->rot_inv[2]*position->z; // X coordinate
int pixel_coord_x = __float2int_rd((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.x) * detector_data_SHARED->inv_pixel_size_X); // Using CUDA intrinsic function to convert float to integer rounding down (towards minus infinite)
if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x))
{
rotated_position = detector_data_SHARED->rot_inv[6]*position->x + detector_data_SHARED->rot_inv[7]*position->y + detector_data_SHARED->rot_inv[8]*position->z; // Z coordinate
int pixel_coord_z = __float2int_rd((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.z) * detector_data_SHARED->inv_pixel_size_Z);
if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y))
{
// -- Particle enters the detector! Tally the particle energy in the corresponding pixel (in tenths of meV):
// Using a CUDA atomic function (not available for global floats yet) to read and increase the pixel value in a single instruction, blocking interferences from other threads.
// The offset for the primaries or scatter images are calculated considering that:
// scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter.
atomicAdd(( image + // Pointer to beginning of image array
(int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image
(pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel
__float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded.
// The maximum unsigned long long int value is ~1.8e19:
}
}
#else
// CPU version (not using CUDA intrinsics: atomicAdd, fast type casting)
rotated_position = detector_data_SHARED->rot_inv[0]*position->x + detector_data_SHARED->rot_inv[1]*position->y + detector_data_SHARED->rot_inv[2]*position->z; // X coordinate
float pixel_coord_x = floor((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X); // Using float+floor instead of INT to avoid truncation errors for positive and negative values
if ( (pixel_coord_x>-0.1f) && (pixel_coord_x<(detector_data_SHARED->num_pixels.x-0.1f)) ) // Rejecting values negative or bigger than the image size
{
rotated_position = detector_data_SHARED->rot_inv[6]*position->x + detector_data_SHARED->rot_inv[7]*position->y + detector_data_SHARED->rot_inv[8]*position->z; // Z coordinate
float pixel_coord_z = floor((rotated_position - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ( (pixel_coord_z>-0.1f) && (pixel_coord_z<(detector_data_SHARED->num_pixels.y-0.1f)) )
image[(int)(((float)*scatter_state)*detector_data_SHARED->total_num_pixels + pixel_coord_x + pixel_coord_z*detector_data_SHARED->num_pixels.x + 0.0001f)]
+= (unsigned long long int)((*energy)*SCALE_eV + 0.5f); // Tally the particle energy in the pixel. This instruction is not thread-safe, but it is ok in sequential CPU code.
}
#endif
}
else // (detector_data_SHARED->rotation_flag != 1) --> Initial source direction is (0,1,0): pixel number and distance can be found easily
{
if (direction->y < 0.0001f)
return; // *** Reject particles not moving towards the detector plane at +Y.
dist_detector = (detector_data_SHARED->center.y - position->y)/(direction->y); // Distance to the intersection with the detector at +Y.
// !!DeBuG!! IF below (v1.2) not needed when checking the angle
// if (dist_detector>(2.1f*detector_data_SHARED->sdd)) return;
#ifdef USING_CUDA
int pixel_coord_x = __float2int_rd((position->x + dist_detector*direction->x - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X);
if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x))
{
int pixel_coord_z = __float2int_rd((position->z + dist_detector*direction->z - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y))
atomicAdd( ( image + // Pointer to beginning of image array
(int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image
(pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel
__float2ull_rn((*energy)*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded.
}
#else
// --Calculate the pixel the xray enters, truncating towards minus infinite and making sure the conversion to int is safe:
float pixel_coord_x = floor((position->x + dist_detector*direction->x - detector_data_SHARED->corner_min_rotated_to_Y.x)*detector_data_SHARED->inv_pixel_size_X);
if ( (pixel_coord_x>-0.1f) && (pixel_coord_x<(detector_data_SHARED->num_pixels.x-0.1f)) )
{
float pixel_coord_z = floor((position->z + dist_detector*direction->z - detector_data_SHARED->corner_min_rotated_to_Y.z)*detector_data_SHARED->inv_pixel_size_Z);
if ( (pixel_coord_z>-0.1f) && (pixel_coord_z<(detector_data_SHARED->num_pixels.y-0.1f)) )
image[(int)(((float)*scatter_state)*detector_data_SHARED->total_num_pixels + pixel_coord_x + pixel_coord_z*detector_data_SHARED->num_pixels.x + 0.0001f)]
+= (unsigned long long int)((*energy)*SCALE_eV + 0.5f); // Truncate the pixel number to INT and round the energy value
}
#endif
}
}
////////////////////////////////////////////////////////////////////////////////
//! Source that creates primary x rays, according to the defined source model.
//! The particles are automatically moved to the surface of the voxel bounding box,
//! to start the tracking inside a real material. If the sampled particle do not
//! enter the voxels, it is init in the focal spot and the main program will check
//! if it arrives at the detector or not.
//!
//! @param[in] source_data Structure describing the source.
//! @param[in] source_energy_data_CONST Global variable in constant memory space describing the source energy spectrum.
//! @param[out] position Initial particle position (particle transported inside the voxel bbox).
//! @param[out] direction Sampled particle direction (cosine vectors).
//! @param[out] energy Sampled energy of the new x ray.
//! @param[in] seed Current seed of the random number generator, requiered to sample the movement direction.
//! @param[out] absvox Set to <0 if primary particle will not cross the voxels, not changed otherwise (>0).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void source(float3* position, float3* direction, float* energy, int2* seed, int* absvox, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED)
{
// *** Sample the initial x-ray energy following the input energy spectrum using the Walker aliasing algorithm from PENELOPE:
// The following code is equivalent to calling the function "seeki_walker": int sampled_bin = seeki_walker(source_data_CONST.espc_cutoff, source_data_CONST.espc_alias, ranecu(seed), source_data_CONST.num_bins_espc);
int sampled_bin;
float RN = ranecu(seed) * source_energy_data_CONST.num_bins_espc; // Find initial interval (array starting at 0):
#ifdef USING_CUDA
int int_part = __float2int_rd(RN); // -- Integer part (round down)
#else
int int_part = (int)(RN);
#endif
float fraction_part = RN - ((float)int_part); // -- Fractional part
if (fraction_part < source_energy_data_CONST.espc_cutoff[int_part]) // Check if we are in the aliased part
sampled_bin = int_part; // Below the cutoff: return current value
else
sampled_bin = (int)source_energy_data_CONST.espc_alias[int_part]; // Above the cutoff: return alias
// Linear interpolation of the final energy within the sampled energy bin:
*energy = source_energy_data_CONST.espc[sampled_bin] + ranecu(seed) * (source_energy_data_CONST.espc[sampled_bin+1] - source_energy_data_CONST.espc[sampled_bin]);
// *** Sample the initial direction:
do // Iterate sampling if the sampled direction is not acceptable to get a square field at the given phi (rejection sampling): force square field for any phi!!
{
// Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat).
direction->z = source_data_SHARED->cos_theta_low + ranecu(seed)*source_data_SHARED->D_cos_theta; // direction->z = w = cos(theta_sampled)
register float phi_sampled = source_data_SHARED->phi_low + ranecu(seed)*source_data_SHARED->D_phi;
register float sin_theta_sampled = sqrtf(1.0f - direction->z*direction->z);
float sinphi_sampled, cosphi_sampled;
#ifdef USING_CUDA
sincos(phi_sampled, &sinphi_sampled,&cosphi_sampled); // Calculate the SIN and COS at the same time.
#else
sinphi_sampled = sin(phi_sampled); // Some CPU compilers will be able to use "sincos", but let's be safe.
cosphi_sampled = cos(phi_sampled);
#endif
direction->y = sin_theta_sampled * sinphi_sampled;
direction->x = sin_theta_sampled * cosphi_sampled;
}
while( fabsf(direction->z/(direction->y+1.0e-7f)) > source_data_SHARED->max_height_at_y1cm ); // !!DeBuG!! Force square field for any phi by rejection sampling!! Is it necessary to use the "+1.0e-7f" to prevent possible division by zero???
if (detector_data_SHARED->rotation_flag == 1)
{
// --Initial beam not pointing to (0,1,0), apply rotation:
register float direction_x_tmp = direction->x;
register float direction_y_tmp = direction->y;
direction->x = source_data_SHARED->rot_fan[0]*direction_x_tmp + source_data_SHARED->rot_fan[1]*direction_y_tmp + source_data_SHARED->rot_fan[2]*direction->z;
direction->y = source_data_SHARED->rot_fan[3]*direction_x_tmp + source_data_SHARED->rot_fan[4]*direction_y_tmp + source_data_SHARED->rot_fan[5]*direction->z;
direction->z = source_data_SHARED->rot_fan[6]*direction_x_tmp + source_data_SHARED->rot_fan[7]*direction_y_tmp + source_data_SHARED->rot_fan[8]*direction->z;
}
// Initialize x ray position at focal spot before translation into bbox. Particle stays in focal spot if no interaction found:
position->x = source_data_SHARED->position.x;
position->y = source_data_SHARED->position.y;
position->z = source_data_SHARED->position.z;
move_to_bbox(position, direction, voxel_data_CONST.size_bbox, absvox); // Move the particle inside the voxel bounding box.
}
////////////////////////////////////////////////////////////////////////////////
//! Functions that moves a particle inside the voxelized geometry bounding box.
//! An EPSILON distance is added to make sure the particles will be clearly inside the bbox,
//! not exactly on the surface.
//!
//! This algorithm makes the following assumtions:
//! - The back lower vertex of the voxel bounding box is always located at the origin: (x0,y0,z0)=(0,0,0).
//! - The initial value of "position" corresponds to the focal spot location.
//! - When a ray is not pointing towards the bbox plane that it should cross according to the sign of the direction,
//! I assign a distance to the intersection =0 instead of the real negative distance. The wall that will be
//! crossed to enter the bbox is always the furthest and therefore a 0 distance will never be used except
//! in the case of a ray starting inside the bbox or outside the bbox and not pointing to any of the 3 planes.
//! In this situation the ray will be transported a 0 distance, meaning that it will stay at the focal spot.
//!
//! (Interesting information on ray-box intersection: http://tog.acm.org/resources/GraphicsGems/gems/RayBox.c)
//!
//! @param[in,out] position Particle position: initially set to the focal spot, returned transported inside the voxel bbox.
//! @param[out] direction Sampled particle direction (cosine vectors).
//! @param[out] intersection_flag Set to <0 if particle outside bbox and will not cross the voxels, not changed otherwise.
//! @param[out] size_bbox Size of the bounding box.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void move_to_bbox(float3* position, float3* direction, float3 size_bbox, int* intersection_flag)
{
float dist_y, dist_x, dist_z;
// -Distance to the nearest Y plane:
if ((direction->y) > EPS_SOURCE) // Moving to +Y: check distance to y=0 plane
{
// Check Y=0 (bbox wall):
if (position->y > 0.0f) // The input position must correspond to the focal spot => position->y == source_data_CONST.position[*num_p].y
dist_y = 0.0f; // No intersection with this plane: particle inside or past the box
// The actual distance would be negative but we set it to 0 bc we will not move the particle if no intersection exist.
else
dist_y = EPS_SOURCE + (-position->y)/(direction->y); // dist_y > 0 for sure in this case
}
else if ((direction->y) < NEG_EPS_SOURCE)
{
// Check Y=voxel_data_CONST.size_bbox.y:
if (position->y < size_bbox.y)
dist_y = 0.0f; // No intersection with this plane
else
dist_y = EPS_SOURCE + (size_bbox.y - position->y)/(direction->y); // dist_y > 0 for sure in this case
}
else // (direction->y)~0
dist_y = NEG_INF; // Particle moving parallel to the plane: no interaction possible (set impossible negative dist = -INFINITE)
// -Distance to the nearest X plane:
if ((direction->x) > EPS_SOURCE)
{
// Check X=0:
if (position->x > 0.0f)
dist_x = 0.0f;
else
dist_x = EPS_SOURCE + (-position->x)/(direction->x); // dist_x > 0 for sure in this case
}
else if ((direction->x) < NEG_EPS_SOURCE)
{
// Check X=voxel_data_CONST.size_bbox.x:
if (position->x < size_bbox.x)
dist_x = 0.0f;
else
dist_x = EPS_SOURCE + (size_bbox.x - position->x)/(direction->x); // dist_x > 0 for sure in this case
}
else
dist_x = NEG_INF;
// -Distance to the nearest Z plane:
if ((direction->z) > EPS_SOURCE)
{
// Check Z=0:
if (position->z > 0.0f)
dist_z = 0.0f;
else
dist_z = EPS_SOURCE + (-position->z)/(direction->z); // dist_z > 0 for sure in this case
}
else if ((direction->z) < NEG_EPS_SOURCE)
{
// Check Z=voxel_data_CONST.size_bbox.z:
if (position->z < size_bbox.z)
dist_z = 0.0f;
else
dist_z = EPS_SOURCE + (size_bbox.z - position->z)/(direction->z); // dist_z > 0 for sure in this case
}
else
dist_z = NEG_INF;
// -- Find the longest distance plane, which is the one that has to be crossed to enter the bbox.
// Storing the maximum distance in variable "dist_z". Distance will be =0 if no intersection exists or
// if the x ray is already inside the bbox.
if ( (dist_y>dist_x) && (dist_y>dist_z) )
dist_z = dist_y; // dist_z == dist_max
else if (dist_x>dist_z)
dist_z = dist_x;
// else
// dist_max = dist_z;
// -- Move particle from the focal spot (current location) to the bbox wall surface (slightly inside):
position->x += dist_z * direction->x;
position->y += dist_z * direction->y;
position->z += dist_z * direction->z;
// Check if the new position is outside the bbox. If true, the particle must be moved back to the focal spot location:
if ( (position->x < 0.0f) || (position->x > size_bbox.x) ||
(position->y < 0.0f) || (position->y > size_bbox.y) ||
(position->z < 0.0f) || (position->z > size_bbox.z) )
{
position->x -= dist_z * direction->x; // Reject new position undoing the previous translation
position->y -= dist_z * direction->y;
position->z -= dist_z * direction->z;
(*intersection_flag) = -111; // Particle outside the bbox AND not pointing to the bbox: set absvox<0 to skip interaction sampling.
}
}
////////////////////////////////////////////////////////////////////////////////
//! Upper limit of the number of random values sampled in a single track.
#define LEAP_DISTANCE 256
//! Multipliers and moduli for the two MLCG in RANECU.
#define a1_RANECU 40014
#define m1_RANECU 2147483563
#define a2_RANECU 40692
#define m2_RANECU 2147483399
////////////////////////////////////////////////////////////////////////////////
//! Initialize the pseudo-random number generator (PRNG) RANECU to a position
//! far away from the previous history (leap frog technique).
//!
//! Each calculated seed initiates a consecutive and disjoint sequence of
//! pseudo-random numbers with length LEAP_DISTANCE, that can be used to
//! in a parallel simulation (Sequence Splitting parallelization method).
//! The basic equation behind the algorithm is:
//! S(i+j) = (a**j * S(i)) MOD m = [(a**j MOD m)*S(i)] MOD m ,
//! which is described in:
//! P L'Ecuyer, Commun. ACM 31 (1988) p.742
//!
//! This function has been adapted from "seedsMLCG.f", see:
//! A Badal and J Sempau, Computer Physics Communications 175 (2006) p. 440-450
//!
//! @param[in] history Particle bach number.
//! @param[in] seed_input Initial PRNG seed input (used to initiate both MLCGs in RANECU).
//! @param[out] seed Initial PRNG seeds for the present history.
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void init_PRNG(int history_batch, int histories_per_thread, int seed_input, int2* seed)
{
// -- Move the RANECU generator to a unique position for the current batch of histories:
// I have to use an "unsigned long long int" value to represent all the simulated histories in all previous batches
// The maximum unsigned long long int value is ~1.8e19: if history >1.8e16 and LEAP_DISTANCE==1000, 'leap' will overflow.
// **** 1st MLCG:
unsigned long long int leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE);
int y = 1;
int z = a1_RANECU;
// -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j
// -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m):
// S(i+j) = [(a**j MOD m)*S(i)] MOD m
// S_i = abMODm(m,S_i,AjMODm)
seed->x = abMODm(m1_RANECU, seed_input, y); // Using the input seed as the starting seed
// **** 2nd MLCG (repeating the previous calculation for the 2nd MLCG parameters):
leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE);
y = 1;
z = a2_RANECU;
for(;;)
{
// (A2) Halve n, and store the integer part and the residue
if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2);
{
leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2);
y = abMODm(m2_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m
if (0==leap) break; // (A4) leap==0? ==> finish
}
else // (leap is even)
{
leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2);
}
z = abMODm(m2_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m
}
// AjMODm2 = y;
seed->y = abMODm(m2_RANECU, seed_input, y); // Using the input seed as the starting seed
}
/////////////////////////////////////////////////////////////////////
//! Calculate "(a1*a2) MOD m" with 32-bit integers and avoiding
//! the possible overflow, using the Russian Peasant approach
//! modulo m and the approximate factoring method, as described
//! in: L'Ecuyer and Cote, ACM Trans. Math. Soft. 17 (1991).
//!
//! This function has been adapted from "seedsMLCG.f", see:
//! Badal and Sempau, Computer Physics Communications 175 (2006)
//!
//! @param[in] m,a,s MLCG parameters
//! @return (a1*a2) MOD m
//
// Input: 0 < a1 < m
// 0 < a2 < m
//
// Return value: (a1*a2) MOD m
//
/////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__ __host__ // Function will be callable from host and also from device
#endif
inline int abMODm(int m, int a, int s)
{
// CAUTION: the input parameters are modified in the function but should not be returned to the calling function! (pass by value!)
int q, k;
int p = -m; // p is always negative to avoid overflow when adding
// ** Apply the Russian peasant method until "a =< 32768":
while (a>32768) // We assume '32' bit integers (4 bytes): 2^(('32'-2)/2) = 32768
{
if (0!=(a&1)) // Store 's' when 'a' is odd Equivalent code: if (1==(a%2))
{
p += s;
if (p>0) p -= m;
}
a >>= 1; // Half a (move bits 1 position right) Equivalent code: a = a/2;
s = (s-m) + s; // Double s (MOD m)
if (s<0) s += m; // (s is always positive)
}
// ** Employ the approximate factoring method (a is small enough to avoid overflow):
q = (int) m / a;
k = (int) s / q;
s = a*(s-k*q)-k*(m-q*a);
while (s<0)
s += m;
// ** Compute the final result:
p += s;
if (p<0) p += m;
return p;
}
////////////////////////////////////////////////////////////////////////////////
//! Pseudo-random number generator (PRNG) RANECU returning a float value
//! (single precision version).
//!
//! @param[in,out] seed PRNG seed (seed kept in the calling function and updated here).
//! @return PRN double value in the open interval (0,1)
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline float ranecu(int2* seed)
{
int i1 = (int)(seed->x/53668);
seed->x = 40014*(seed->x-i1*53668)-i1*12211;
int i2 = (int)(seed->y/52774);
seed->y = 40692*(seed->y-i2*52774)-i2*3791;
if (seed->x < 0) seed->x += 2147483563;
if (seed->y < 0) seed->y += 2147483399;
i2 = seed->x-seed->y;
if (i2 < 1) i2 += 2147483562;
#ifdef USING_CUDA
return (__int2float_rn(i2)*4.65661305739e-10f); // 4.65661305739e-10 == 1/2147483563
#else
return ((float)(i2)*4.65661305739e-10f);
#endif
}
////////////////////////////////////////////////////////////////////////////////
//! Pseudo-random number generator (PRNG) RANECU returning a double value.
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline double ranecu_double(int2* seed)
{
int i1 = (int)(seed->x/53668);
seed->x = 40014*(seed->x-i1*53668)-i1*12211;
int i2 = (int)(seed->y/52774);
seed->y = 40692*(seed->y-i2*52774)-i2*3791;
if (seed->x < 0) seed->x += 2147483563;
if (seed->y < 0) seed->y += 2147483399;
i2 = seed->x-seed->y;
if (i2 < 1) i2 += 2147483562;
#ifdef USING_CUDA
return (__int2double_rn(i2)*4.6566130573917692e-10);
#else
return ((double)(i2)*4.6566130573917692e-10);
#endif
}
////////////////////////////////////////////////////////////////////////////////
//! Find the voxel that contains the current position.
//! Report the voxel absolute index and the x,y,z indices.
//! The structure containing the voxel number and size is read from CONSTANT memory.
//!
//! @param[in] position Particle position
//! @param[out] voxel_coord Pointer to three integer values (short3*) that will store the x,y and z voxel indices.
//! @return Returns "absvox", the voxel number where the particle is
//! located (negative if position outside the voxel bbox).
//!
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline int locate_voxel(float3* position, short3* voxel_coord)
{
if ( (position->y < EPS_SOURCE) || (position->y > (voxel_data_CONST.size_bbox.y - EPS_SOURCE)) ||
(position->x < EPS_SOURCE) || (position->x > (voxel_data_CONST.size_bbox.x - EPS_SOURCE)) ||
(position->z < EPS_SOURCE) || (position->z > (voxel_data_CONST.size_bbox.z - EPS_SOURCE)) )
{
// -- Particle escaped the voxelized geometry (using EPS_SOURCE to avoid numerical precision errors):
return -1;
}
// -- Particle inside the voxelized geometry, find current voxel:
// The truncation from float to integer could give troubles for negative coordinates but this will never happen thanks to the IF at the begining of this function.
// (no need to use the CUDA function to convert float to integer rounding down (towards minus infinite): __float2int_rd)
register int voxel_coord_x, voxel_coord_y, voxel_coord_z;
#ifdef USING_CUDA
voxel_coord_x = __float2int_rd(position->x * voxel_data_CONST.inv_voxel_size.x);
voxel_coord_y = __float2int_rd(position->y * voxel_data_CONST.inv_voxel_size.y);
voxel_coord_z = __float2int_rd(position->z * voxel_data_CONST.inv_voxel_size.z);
#else
voxel_coord_x = (int)(position->x * voxel_data_CONST.inv_voxel_size.x);
voxel_coord_y = (int)(position->y * voxel_data_CONST.inv_voxel_size.y);
voxel_coord_z = (int)(position->z * voxel_data_CONST.inv_voxel_size.z);
#endif
// Output the voxel coordinates as short int (2 bytes) instead of int (4 bytes) to save registers; avoid type castings in the calculation of the return value.
voxel_coord->x = (short int) voxel_coord_x;
voxel_coord->y = (short int) voxel_coord_y;
voxel_coord->z = (short int) voxel_coord_z;
return (voxel_coord_x + voxel_coord_y*(voxel_data_CONST.num_voxels.x) + voxel_coord_z*(voxel_data_CONST.num_voxels.x)*(voxel_data_CONST.num_voxels.y));
}
//////////////////////////////////////////////////////////////////////
//! Rotates a vector; the rotation is specified by giving
//! the polar and azimuthal angles in the "self-frame", as
//! determined by the vector to be rotated.
//! This function is a literal translation from Fortran to C of
//! PENELOPE (v. 2006) subroutine "DIRECT".
//!
//! @param[in,out] (u,v,w) input vector (=d) in the lab. frame; returns the rotated vector components in the lab. frame
//! @param[in] costh cos(theta), angle between d before and after turn
//! @param[in] phi azimuthal angle (rad) turned by d in its self-frame
//
// Output:
// (u,v,w) -> rotated vector components in the lab. frame
//
// Comments:
// -> (u,v,w) should have norm=1 on input; if not, it is
// renormalized on output, provided norm>0.
// -> The algorithm is based on considering the turned vector
// d' expressed in the self-frame S',
// d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th))
// and then apply a change of frame from S' to the lab
// frame. S' is defined as having its z' axis coincident
// with d, its y' axis perpendicular to z and z' and its
// x' axis equal to y'*z'. The matrix of the change is then
// / uv/rho -v/rho u \
// S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5
// \ -rho 0 w /
// -> When rho=0 (w=1 or -1) z and z' are parallel and the y'
// axis cannot be defined in this way. Instead y' is set to
// y and therefore either x'=x (if w=1) or x'=-x (w=-1)
//////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void rotate_double(float3* direction, double costh, double phi) // !!DeBuG!! The direction vector is single precision but the rotation is performed in doule precision for increased accuracy.
{
double DXY, NORM, cosphi, sinphi, SDT;
DXY = direction->x*direction->x + direction->y*direction->y;
#ifdef USING_CUDA
sincos(phi, &sinphi,&cosphi); // Calculate the SIN and COS at the same time.
#else
sinphi = sin(phi); // Some CPU compilers will be able to use "sincos", but let's be safe.
cosphi = cos(phi);
#endif
// **** Ensure normalisation
NORM = DXY + direction->z*direction->z; // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!!
if (fabs(NORM-1.0)>1.0e-14)
{
NORM = 1.0/sqrt(NORM);
direction->x = NORM*direction->x;
direction->y = NORM*direction->y;
direction->z = NORM*direction->z;
DXY = direction->x*direction->x + direction->y*direction->y;
}
if (DXY>1.0e-28)
{
SDT = sqrt((1.0-costh*costh)/DXY);
float direction_x_in = direction->x;
direction->x = direction->x*costh + SDT*(direction_x_in*direction->z*cosphi-direction->y*sinphi);
direction->y = direction->y*costh+SDT*(direction->y*direction->z*cosphi+direction_x_in*sinphi);
direction->z = direction->z*costh-DXY*SDT*cosphi;
}
else
{
SDT = sqrt(1.0-costh*costh);
direction->y = SDT*sinphi;
if (direction->z>0.0)
{
direction->x = SDT*cosphi;
direction->z = costh;
}
else
{
direction->x =-SDT*cosphi;
direction->z =-costh;
}
}
}
//////////////////////////////////////////////////////////////////////
// ***********************************************************************
// * Translation of PENELOPE's "SUBROUTINE GRAa" from FORTRAN77 to C *
// ***********************************************************************
//! Sample a Rayleigh interaction using the sampling algorithm
//! used in PENELOPE 2006.
//!
//! @param[in] energy Particle energy (not modified with Rayleigh)
//! @param[out] costh_Rayleigh Cosine of the angular deflection
//! @param[in] material Current voxel material
//
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
// C PENELOPE/PENGEOM (version 2006) C
// C Copyright (c) 2001-2006 C
// C Universitat de Barcelona C
// C Permission to use, copy, modify, distribute and sell this software C
// C and its documentation for any purpose is hereby granted without C
// C fee, provided that the above copyright notice appears in all C
// C copies and that both that copyright notice and this permission C
// C notice appear in all supporting documentation. The Universitat de C
// C Barcelona makes no representations about the suitability of this C
// C software for any purpose. It is provided "as is" without express C
// C or implied warranty. C
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline void GRAa(float *energy, double *costh_Rayleigh, int *mat, float *pmax_current, int2 *seed, struct rayleigh_struct* cgra)
{
/* **** Energy grid and interpolation constants for the current energy. */
double xmax = ((double)*energy) * 8.065535669099010e-5; // 8.065535669099010e-5 == 2.0*20.6074/510998.918
double x2max = min_value( (xmax*xmax) , ((double)cgra->xco[(*mat+1)*NP_RAYLEIGH - 1]) ); // Get the last tabulated value of xco for this mat
if (xmax < 0.01)
{
do
{
*costh_Rayleigh = 1.0 - ranecu_double(seed) * 2.0;
}
while ( ranecu_double(seed) > (((*costh_Rayleigh)*(*costh_Rayleigh)+1.0)*0.5) );
return;
}
for(;;) // (Loop will iterate everytime the sampled value is rejected or above maximum)
{
double ru = ranecu_double(seed) * (double)(*pmax_current); // Pmax for the current energy is entered as a parameter
/* **** Selection of the interval (binary search within pre-calculated limits). */
int itn = (int)(ru * (NP_RAYLEIGH-1)); // 'itn' will never reach the last interval 'NP_RAYLEIGH-1', but this is how RITA is implemented in PENELOPE
int i__ = (int)cgra->itlco[itn + (*mat)*NP_RAYLEIGH];
int j = (int)cgra->ituco[itn + (*mat)*NP_RAYLEIGH];
if ((j - i__) > 1)
{
do
{
register int k = (i__ + j)>>1; // >>1 == /2
if (ru > cgra->pco[k -1 + (*mat)*NP_RAYLEIGH])
i__ = k;
else
j = k;
}
while ((j - i__) > 1);
}
/* **** Sampling from the rational inverse cumulative distribution. */
int index = i__ - 1 + (*mat)*NP_RAYLEIGH;
double rr = ru - cgra->pco[index];
double xx;
if (rr > 1e-16)
{
double d__ = (double)(cgra->pco[index+1] - cgra->pco[index]);
float aco_index = cgra->aco[index], bco_index = cgra->bco[index], xco_index = cgra->xco[index]; // Avoid multiple accesses to the same global variable
xx = (double)xco_index + (double)(aco_index + 1.0f + bco_index)* d__* rr / (d__*d__ + (aco_index*d__ + bco_index*rr) * rr) * (double)(cgra->xco[index+1] - xco_index);
}
else
{
xx = cgra->xco[index];
}
if (xx < x2max)
{
// Sampled value below maximum possible value:
*costh_Rayleigh = 1.0 - 2.0 * xx / x2max; // !!DeBuG!! costh_Rayleigh in double precision, but not all intermediate steps are!?
/* **** Rejection: */
if (ranecu_double(seed) < (((*costh_Rayleigh)*(*costh_Rayleigh) + 1.0)*0.5))
break; // Sample value not rejected! break loop and return.
}
}
} /* graa */
//////////////////////////////////////////////////////////////////////////
// ***********************************************************************
// * Translation of PENELOPE's "SUBROUTINE GCOa" from FORTRAN77 to C *
// ********************************************************************* *
//! Random sampling of incoherent (Compton) scattering of photons, using
//! the sampling algorithm from PENELOPE 2006:
//! Relativistic impulse approximation with analytical one-electron Compton profiles
// !!DeBuG!! In penelope, Doppler broadening is not used for E greater than 5 MeV.
// We don't use it in GPU to reduce the lines of code and prevent using COMMON/compos/ZT(M)
//! @param[in,out] energy incident and final photon energy (eV)
//! @param[out] costh_Compton cosine of the polar scattering angle
//! @param[in] material Current voxel material
//! @param[in] seed RANECU PRNG seed
//
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
// C PENELOPE/PENGEOM (version 2006) C
// C Copyright (c) 2001-2006 C
// C Universitat de Barcelona C
// C Permission to use, copy, modify, distribute and sell this software C
// C and its documentation for any purpose is hereby granted without C
// C fee, provided that the above copyright notice appears in all C
// C copies and that both that copyright notice and this permission C
// C notice appear in all supporting documentation. The Universitat de C
// C Barcelona makes no representations about the suitability of this C
// C software for any purpose. It is provided "as is" without express C
// C or implied warranty. C
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
//
// ************************************************************************
#ifdef USING_CUDA
__device__
#endif
inline void GCOa(float *energy, double *costh_Compton, int *mat, int2 *seed, struct compton_struct* cgco_SHARED)
{
float s, a1, s0, af, ek, ek2, ek3, tau, pzomc, taumin;
float rn[MAX_SHELLS];
double cdt1;
// Some variables used in PENELOPE have been eliminated to save register: float aux, taum2, fpzmax, a, a2, ek1 ,rni, xqc, fpz, pac[MAX_SHELLS];
int i__;
int my_noscco = cgco_SHARED->noscco[*mat]; // Store the number of oscillators for the input material in a local variable
#ifndef USING_CUDA
static int warning_flag_1 = -1, warning_flag_2 = -1, warning_flag_3 = -1; // Write warnings for the CPU code, but only once. !!DeBuG!!
#endif
ek = *energy * 1.956951306108245e-6f; // (1.956951306108245e-6 == 1.0/510998.918)
ek2 = ek * 2.f + 1.f;
ek3 = ek * ek;
// ek1 = ek3 - ek2 - 1.;
taumin = 1.f / ek2;
// taum2 = taumin * taumin;
a1 = logf(ek2);
// a2 = a1 + ek * 2. * (ek + 1.) * taum2; // a2 was used only once, code moved below
/* **** Incoherent scattering function for theta=PI. */
s0 = 0.0f;
for (i__ = 0; i__ < my_noscco; i__++)
{
register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS];
if (temp < *energy)
{
register float aux = *energy * (*energy - temp) * 2.f;
#ifdef USING_CUDA
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f;
// 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!!
#else
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f);
#endif
if (pzomc > 0.0f)
temp = (0.707106781186545f+pzomc*1.4142135623731f) * (0.707106781186545f+pzomc*1.4142135623731f);
else
temp = (0.707106781186545f-pzomc*1.4142135623731f) * (0.707106781186545f-pzomc*1.4142135623731f);
temp = 0.5f * expf(0.5f - temp); // Calculate EXP outside the IF to avoid branching
if (pzomc > 0.0f)
temp = 1.0f - temp;
s0 += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp;
}
}
/* **** Sampling tau. */
do
{
if (ranecu(seed)*/*a2=*/(a1+2.*ek*(ek+1.f)*taumin*taumin) < a1)
{
tau = powf(taumin, ranecu(seed)); // !!DeBuG!! "powf()" has a big error (7 ULP), the double version has only 2!!
}
else
{
tau = sqrtf(1.f + ranecu(seed) * (taumin * taumin - 1.f));
}
cdt1 = (double)(1.f-tau) / (((double)tau)*((double)*energy)*1.956951306108245e-6); // !!DeBuG!! The sampled COS will be double precision, but TAU is not!!!
if (cdt1 > 2.0) cdt1 = 1.99999999; // !!DeBuG!! Make sure that precision error in POW, SQRT never gives cdt1>2 ==> costh_Compton<-1
/* **** Incoherent scattering function. */
s = 0.0f;
for (i__ = 0; i__ < my_noscco; i__++)
{
register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS];
if (temp < *energy)
{
register float aux = (*energy) * (*energy - temp) * ((float)cdt1);
if ((aux>1.0e-12f)||(temp>1.0e-12f)) // !!DeBuG!! Make sure the SQRT argument is never <0, and that we never get 0/0 -> NaN when aux=temp=0 !!
{
#ifdef USING_CUDA
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f;
// 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!!
#else
pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f);
#endif
}
else
{
pzomc = 0.002f; // !!DeBuG!! Using a rough approximation to a sample value of pzomc found using pure double precision: NOT RIGUROUS! But this code is expected to be used very seldom, only in extreme cases.
#ifndef USING_CUDA
if (warning_flag_1<0)
{
warning_flag_1 = +1; // Disable warning, do not show again
printf(" [... Small numerical precision error detected computing \"pzomc\" in GCOa (this warning will not be repeated).]\n i__=%d, aux=%.14f, temp=%.14f, pzomc(forced)=%.14f, uico=%.14f, energy=%.7f, cgco_SHARED->fj0=%.14f, mat=%d, cdt1=%.14lf\n", (int)i__, aux, temp, pzomc, cgco_SHARED->uico[*mat+i__*MAX_MATERIALS], *energy, cgco_SHARED->fj0[*mat+i__*MAX_MATERIALS], (int)*mat, cdt1); // !!DeBuG!!
}
#endif
}
temp = pzomc * 1.4142135623731f;
if (pzomc > 0.0f)
temp = 0.5f - (temp + 0.70710678118654502f) * (temp + 0.70710678118654502f); // Calculate exponential argument
else
temp = 0.5f - (0.70710678118654502f - temp) * (0.70710678118654502f - temp);
temp = 0.5f * expf(temp); // All threads will calculate the expf together
if (pzomc > 0.0f)
temp = 1.0f - temp;
s += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp;
rn[i__] = temp;
}
}
} while( (ranecu(seed)*s0) > (s*(1.0f+tau*(/*ek1=*/(ek3 - ek2 - 1.0f)+tau*(ek2+tau*ek3)))/(ek3*tau*(tau*tau+1.0f))) ); // **** Rejection function
*costh_Compton = 1.0 - cdt1;
/* **** Target electron shell. */
for (;;)
{
register float temp = s*ranecu(seed);
float pac = 0.0f;
int ishell = my_noscco - 1; // First shell will have number 0
for (i__ = 0; i__ < (my_noscco-1); i__++) // !!DeBuG!! Iterate to (my_noscco-1) only: the last oscillator is excited in case all other fail (no point in double checking) ??
{
pac += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * rn[i__]; // !!DeBuG!! pac[] is calculated on the fly to save registers!
if (pac > temp) // pac[] is calculated on the fly to save registers!
{
ishell = i__;
break;
}
}
/* **** Projected momentum of the target electron. */
temp = ranecu(seed) * rn[ishell];
if (temp < 0.5f)
{
pzomc = (0.70710678118654502f - sqrtf(0.5f - logf(temp + temp))) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f);
}
else
{
pzomc = (sqrtf(0.5f - logf(2.0f - 2.0f*temp)) - 0.70710678118654502f) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f);
}
if (pzomc < -1.0f)
{
continue; // re-start the loop
}
/* **** F(EP) rejection. */
temp = tau * (tau - (*costh_Compton) * 2.f) + 1.f; // this variable was originally called "xqc"
// af = sqrt( max_value(temp,1.0e-30f) ) * (tau * (tau - *costh_Compton) / max_value(temp,1.0e-30f) + 1.f); //!!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!!
if (temp>1.0e-20f) // !!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!!
{
af = sqrtf(temp) * (tau * (tau - ((float)(*costh_Compton))) / temp + 1.f);
}
else
{
// When using single precision, it is possible (but very uncommon) to get costh_Compton==1 and tau==1; then temp is 0 and 'af' can not be calculated (0/0 -> nan). Analysing the results obtained using double precision, we found that 'af' would be almost 0 in this situation, with an "average" about ~0.002 (this is just a rough estimation, but using af=0 the value would never be rejected below).
af = 0.00200f; // !!DeBuG!!
#ifndef USING_CUDA
if (warning_flag_2<0)
{
warning_flag_2 = +1; // Disable warning, do not show again
printf(" [... Small numerical precision error detected computing \"af\" in GCOa (this warning will not be repeated)].\n xqc=%.14f, af(forced)=%.14f, tau=%.14f, costh_Compton=%.14lf\n", temp, af, tau, *costh_Compton); // !!DeBuG!!
}
#endif
}
if (af > 0.0f)
{
temp = af * 0.2f + 1.f; // this variable was originally called "fpzmax"
}
else
{
temp = 1.f - af * 0.2f;
}
if ( ranecu(seed)*temp < /*fpz =*/(af * max_value( min_value(pzomc,0.2f) , -0.2f ) + 1.f) )
{
break;
}
}
/* **** Energy of the scattered photon. */
{
register float t, b1, b2, temp;
t = pzomc * pzomc;
b1 = 1.f - t * tau * tau;
b2 = 1.f - t * tau * ((float)(*costh_Compton));
temp = sqrtf( fabsf(b2 * b2 - b1 * (1.0f - t)) );
if (pzomc < 0.0f)
temp *= -1.0f;
// !Error! energy may increase (slightly) due to inacurate calculation! !!DeBuG!!
t = (tau / b1) * (b2 + temp);
if (t > 1.0f)
{
#ifndef USING_CUDA
#endif
#ifndef USING_CUDA
if (warning_flag_3<0)
{
warning_flag_3 = +1; // Disable warning, do not show again
printf("\n [... a Compton event tried to increase the x ray energy due to precision error. Keeping initial energy. (This warning will not be repeated.)]\n scaling=%.14f, costh_Compton=%.14lf\n", t, *costh_Compton); // !!DeBuG!!
}
#endif
t = 1.0f; // !!DeBuG!! Avoid increasing energy by hand!!! not nice!!
}
(*energy) *= t;
// (*energy) *= (tau / b1) * (b2 + temp); // Original PENELOPE code
}
} // [End subroutine GCOa]
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Tally the depose deposited inside each material.
//! This function is called whenever a particle suffers a Compton or photoelectric
//! interaction. The energy released in each interaction is added and later in the
//! report function the total deposited energy is divided by the total mass of the
//! material in the voxelized object to get the dose. This naturally accounts for
//! multiple densities for voxels with the same material (not all voxels have same mass).
//! Electrons are not transported in MC-GPU and therefore we are approximating
//! that the dose is equal to the KERMA (energy released by the photons alone).
//! This approximation is acceptable when there is electronic equilibrium and
//! when the range of the secondary electrons is shorter than the organ size.
//!
//! The function uses atomic functions for a thread-safe access to the GPU memory.
//! We can check if this tally was disabled in the input file checking if the array
//! materials_dose was allocated in the GPU (disabled if pointer = NULL).
//!
//!
//! @param[in] Edep Energy deposited in the interaction
//! @param[in] material Current material id number
//! @param[out] materials_dose ulonglong2 array storing the mateials dose [in eV/g] and dose^2 (ie, uncertainty).
////////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
__device__
#endif
inline
void tally_materials_dose(float* Edep, int* material, ulonglong2* materials_dose)
{
// !!DeBuG!! The energy can be tallied directly with atomicAdd in global memory or using shared memory first and then global for whole block if too slow. With the initial testing it looks like using global memory is already very fast!
// !!DeBuG!! WARNING: with many histories and few materials the materials_dose integer variables may overflow!! Using double precision floats would be better. Single precision is not good enough because adding small energies to a large counter would give problems.
#ifdef USING_CUDA
atomicAdd(&materials_dose[*material].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the material, scaled by the factor SCALE_eV and rounded.
atomicAdd(&materials_dose[*material].y, __float2ull_rn((*Edep)*(*Edep)) ); // Square of the dose to estimate standard deviation (not using SCALE_eV for std_dev to prevent overflow)
#else
materials_dose[*material].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f);
materials_dose[*material].y += (unsigned long long int)((*Edep)*(*Edep) + 0.5f);
#endif
return;
}
|
971b1f159bdc61e206ee1b45ccb7216b96238401.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file histo-global.cu histogram with global memory atomics */
#include <assert.h>
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <iostream>
using namespace std;
/** CUDA check macro */
#define cucheck(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
#define cucheck_dev(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
printf("%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
assert(0); \
}\
}
//Threads per block
#define SUBDIV 32
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
__host__ __device__ int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 256
#define BS 256
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int y = threadIdx.x + blockIdx.x * blockDim.x;
for (int x = 0; x < w; x++)
dwells[y * w + x] = pixel_dwell(w, h, cmin, cmax, x, y);
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
int main(int argc, char **argv) {
if (argc != 2)
{
fprintf(stderr, "Provide image size, please.\n");
return 0;
}
// allocate memory
int w = atoi(argv[1])*1024, h = atoi(argv[1])*1024;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(hipMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
hipLaunchKernelGGL(( mandelbrot_k), dim3(divup(h, SUBDIV)), dim3(SUBDIV), 0, 0,
d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(hipDeviceSynchronize());
double t2 = omp_get_wtime();
cucheck(hipMemcpy(h_dwells, d_dwells, dwell_sz, hipMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image("mandelbrot-set-ignore.png", h_dwells, w, h);
// print performance
cout << gpu_time << ' ' << w*h/(1048576*gpu_time) << endl;
// free data
hipFree(d_dwells);
free(h_dwells);
return 0;
} // main
| 971b1f159bdc61e206ee1b45ccb7216b96238401.cu | /** @file histo-global.cu histogram with global memory atomics */
#include <assert.h>
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <iostream>
using namespace std;
/** CUDA check macro */
#define cucheck(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
#define cucheck_dev(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
printf("%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
assert(0); \
}\
}
//Threads per block
#define SUBDIV 32
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
__host__ __device__ int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 256
#define BS 256
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int y = threadIdx.x + blockIdx.x * blockDim.x;
for (int x = 0; x < w; x++)
dwells[y * w + x] = pixel_dwell(w, h, cmin, cmax, x, y);
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
int main(int argc, char **argv) {
if (argc != 2)
{
fprintf(stderr, "Provide image size, please.\n");
return 0;
}
// allocate memory
int w = atoi(argv[1])*1024, h = atoi(argv[1])*1024;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(cudaMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
mandelbrot_k<<<divup(h, SUBDIV), SUBDIV>>>
(d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(cudaThreadSynchronize());
double t2 = omp_get_wtime();
cucheck(cudaMemcpy(h_dwells, d_dwells, dwell_sz, cudaMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image("mandelbrot-set-ignore.png", h_dwells, w, h);
// print performance
cout << gpu_time << ' ' << w*h/(1048576*gpu_time) << endl;
// free data
cudaFree(d_dwells);
free(h_dwells);
return 0;
} // main
|
52a20a547d6698397716154567a8517cf4ca7253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
using namespace std;
__global__ void addition( float* x, float* y, float* z, int num)
{
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
const unsigned int bdim = blockDim.x;
const unsigned int gdim = gridDim.x;
int step=bdim*gdim;
for (int id=bid * bdim + tid;id<num;id+=step)
{
z[id]=x[id]+y[id];
}
}
int main(void) {
const int num=100;
//array
float *h_x;//variable x(host)
float *h_y;//variable y(host)
float *h_z;//variable z(host)
h_x = (float*) malloc(sizeof( float)*num);
h_y = (float*) malloc(sizeof( float)*num);
h_z = (float*) malloc(sizeof( float)*num);
float *d_x;//variable x(device)
float *d_y;//variable y(device)
float *d_z;//variable z(device)
hipMalloc( (void**) &d_x, sizeof( float) * num );
hipMalloc( (void**) &d_y, sizeof( float) * num );
hipMalloc( (void**) &d_z, sizeof( float) * num );
//initial values
for (int i=0;i<num;i++) {
h_x[i]=i;
h_y[i]=i*2;
}
//copy x and y from CPU to GPU
hipMemcpy( d_x, h_x, sizeof( float) * num , hipMemcpyHostToDevice);
hipMemcpy( d_y, h_y, sizeof( float) * num , hipMemcpyHostToDevice);
dim3 grid(256, 1, 1);
dim3 threads(256, 1, 1);
//execute GPU function
hipLaunchKernelGGL(( addition), dim3(grid), dim3(threads) , 0, 0, d_x, d_y, d_z, num);
//copy z from GPU to CPU
hipMemcpy( h_z, d_z, sizeof( float) * num, hipMemcpyDeviceToHost);
for (int i=0;i<num;i++) {
cout<<h_x[i]<<"\t"<<h_y[i]<<"\t"<<h_z[i]<<"\n";
}
free(h_x);
free(h_y);
free(h_z);
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
return 0;
}
| 52a20a547d6698397716154567a8517cf4ca7253.cu | #include <iostream>
#include <fstream>
using namespace std;
__global__ void addition( float* x, float* y, float* z, int num)
{
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
const unsigned int bdim = blockDim.x;
const unsigned int gdim = gridDim.x;
int step=bdim*gdim;
for (int id=bid * bdim + tid;id<num;id+=step)
{
z[id]=x[id]+y[id];
}
}
int main(void) {
const int num=100;
//array
float *h_x;//variable x(host)
float *h_y;//variable y(host)
float *h_z;//variable z(host)
h_x = (float*) malloc(sizeof( float)*num);
h_y = (float*) malloc(sizeof( float)*num);
h_z = (float*) malloc(sizeof( float)*num);
float *d_x;//variable x(device)
float *d_y;//variable y(device)
float *d_z;//variable z(device)
cudaMalloc( (void**) &d_x, sizeof( float) * num );
cudaMalloc( (void**) &d_y, sizeof( float) * num );
cudaMalloc( (void**) &d_z, sizeof( float) * num );
//initial values
for (int i=0;i<num;i++) {
h_x[i]=i;
h_y[i]=i*2;
}
//copy x and y from CPU to GPU
cudaMemcpy( d_x, h_x, sizeof( float) * num , cudaMemcpyHostToDevice);
cudaMemcpy( d_y, h_y, sizeof( float) * num , cudaMemcpyHostToDevice);
dim3 grid(256, 1, 1);
dim3 threads(256, 1, 1);
//execute GPU function
addition<<< grid, threads >>>( d_x, d_y, d_z, num);
//copy z from GPU to CPU
cudaMemcpy( h_z, d_z, sizeof( float) * num, cudaMemcpyDeviceToHost);
for (int i=0;i<num;i++) {
cout<<h_x[i]<<"\t"<<h_y[i]<<"\t"<<h_z[i]<<"\n";
}
free(h_x);
free(h_y);
free(h_z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
return 0;
}
|
746c2ddb32ca9074f2a06923b37ac2963da4a5dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
namespace at { namespace native {
std::vector<Tensor> foreach_tensor_add_scalar_kernel_cuda(TensorList tensors, Scalar scalar) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_add_scalar_kernel_slow(tensors, scalar);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(std::move(tensors.vec()));
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_tensor_add_scalar_kernel_cuda", [&]() {
multi_tensor_apply<2>(tensor_lists, AddScalarFunctor<scalar_t>(), scalar.to<scalar_t>());
});
return tensor_lists[1];
}
void foreach_tensor_add_scalar_kernel_cuda_(TensorList tensors, Scalar scalar) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_add_scalar_kernel_slow_(tensors, scalar);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(std::move(tensors.vec()));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_tensor_add_scalar_kernel_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists, AddScalarFunctor_<scalar_t>(), scalar.to<scalar_t>());
});
}
}} // namespace at::native
| 746c2ddb32ca9074f2a06923b37ac2963da4a5dd.cu | #include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
namespace at { namespace native {
std::vector<Tensor> foreach_tensor_add_scalar_kernel_cuda(TensorList tensors, Scalar scalar) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_add_scalar_kernel_slow(tensors, scalar);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
for (const auto& t: tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(std::move(tensors.vec()));
tensor_lists.emplace_back(std::move(vec_res));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_tensor_add_scalar_kernel_cuda", [&]() {
multi_tensor_apply<2>(tensor_lists, AddScalarFunctor<scalar_t>(), scalar.to<scalar_t>());
});
return tensor_lists[1];
}
void foreach_tensor_add_scalar_kernel_cuda_(TensorList tensors, Scalar scalar) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors, scalar)) {
return at::native::foreach_tensor_add_scalar_kernel_slow_(tensors, scalar);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(std::move(tensors.vec()));
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kBFloat16, kHalf, tensors[0].scalar_type(), "foreach_tensor_add_scalar_kernel_cuda_", [&]() {
multi_tensor_apply<1>(tensor_lists, AddScalarFunctor_<scalar_t>(), scalar.to<scalar_t>());
});
}
}} // namespace at::native
|
88c0130977f223728b076fd95e87671dc7ede26c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/label_specific_margin_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArcCosDegree(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Dtype(acos(in[index]) / M_PI * 180.0);
}
}
template <typename Dtype>
__global__ void CreateMask(const int num, const int dim, const Dtype* label, Dtype* positive_mask, Dtype* negative_mask) {
CUDA_KERNEL_LOOP(index, num) {
int gt = static_cast<int>(label[index]);
positive_mask[index*dim + gt] = Dtype(1);
negative_mask[index*dim + gt] = Dtype(0);
}
}
template <typename Dtype>
__global__ void LabelSpecificSoftMarginForward(const int n, const int dim, const Dtype* bottom_data, const Dtype* label,
Dtype* top_data, Dtype* theta, Dtype margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
theta[index * dim + gt] = acos(bottom_data[index * dim + gt]);
if (margin * theta[index * dim + gt] > M_PI - 1e-2) {
theta[index * dim + gt] = M_PI - 1e-2;
}
top_data[index * dim + gt] = cos(margin * theta[index * dim + gt]);
}
}
template <typename Dtype>
__global__ void LabelSpecificSoftMarginBackward(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* bottom_diff, const Dtype* bottom_data, const Dtype* theta, const Dtype* top_data, Dtype margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
Dtype gradient = margin * sin(margin * theta[index * dim + gt]) / sqrt(1 - bottom_data[index * dim + gt] * bottom_data[index * dim + gt] + 1e-12);
gradient = gradient > 2 ? 2 : gradient;//bound the gradient.
gradient = gradient < -2 ? -2 : gradient;
bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * gradient;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginForward(const int n, const int dim, const Dtype* bottom_data, const Dtype* label,
Dtype* top_data, Dtype cos_margin, Dtype sin_margin) {
CUDA_KERNEL_LOOP(index, n) {
int l = static_cast<int>(label[index]);
top_data[index * dim + l] = bottom_data[index * dim + l] * cos_margin -
sqrt(1 - bottom_data[index * dim + l] * bottom_data[index * dim + l] + 1e-12) * sin_margin;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginBackward(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* bottom_diff, const Dtype* bottom_data, Dtype cos_margin, Dtype sin_margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
Dtype gradient = cos_margin -
bottom_data[index * dim + gt] / sqrt(1 - bottom_data[index * dim + gt] * bottom_data[index * dim + gt] + 1e-12) * sin_margin;
gradient = gradient > 2 ? 2 : gradient;//bound the gradient.
gradient = gradient < -2 ? -2 : gradient;
bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * gradient;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginBackwardToMargin(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* margin_diff, const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
margin_diff[index] = top_diff[index * dim + gt] * sqrt(1 - top_data[index * dim + gt] * top_data[index * dim + gt]);
}
}
template <typename Dtype>
void LabelSpecificMarginLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* margin = this->blobs_[0]->mutable_cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
if (has_margin_base_ && this->phase_ == TRAIN) {
margin[0] = margin_base_ + pow(((Dtype)1. + gamma_ * iter_), power_) - 1;
iter_++;
}
if (has_margin_max_ && this->phase_ == TRAIN) {
margin[0] = ::min(margin[0], margin_max_);
}
if (top.size() == 2 && auto_tune_) {
Dtype *positive_mask_data = positive_mask.mutable_gpu_data();
Dtype *negative_mask_data = negative_mask.mutable_gpu_data();
caffe_gpu_set(count, Dtype(0), positive_mask_data);
caffe_gpu_set(count, Dtype(1), negative_mask_data);
// NOLINT_NEXT_LINE(whitespace/operators)
CreateMask<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, positive_mask.mutable_gpu_data(), negative_mask.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
Dtype positive_mean;
//Dtype positive_std;
Dtype negative_mean;
//Dtype negative_std;
// NOLINT_NEXT_LINE(whitespace/operators)
ArcCosDegree<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom_angle.mutable_gpu_data());
//caffe_gpu_powx(count, bottom_angle.gpu_data(), Dtype(2), bottom_square.mutable_gpu_data());
caffe_gpu_dot(count, bottom_angle.gpu_data(), positive_mask.gpu_data(), &positive_mean);
//caffe_gpu_dot(count, bottom_square.gpu_data(), positive_mask.gpu_data(), &positive_std);
caffe_gpu_dot(count, bottom_angle.gpu_data(), negative_mask.gpu_data(), &negative_mean);
//caffe_gpu_dot(count, bottom_square.gpu_data(), negative_mask.gpu_data(), &negative_std);
positive_mean /= num;
//positive_std = sqrt(positive_std / num - positive_mean * positive_mean);
negative_mean /= num * (dim - 1);
//negative_std = sqrt(negative_std / num / (dim - 1) - negative_mean * negative_mean);
if (this->phase_ == TEST) {
top[1]->mutable_cpu_data()[0] = margin[0];
top[1]->mutable_cpu_data()[1] = positive_mean;
//top[1]->mutable_cpu_data()[2] = positive_std;
top[1]->mutable_cpu_data()[2] = negative_mean;
//top[1]->mutable_cpu_data()[4] = negative_std;
}
else {
if (iter_ == 1) {
margin[1] = positive_mean;
//margin[2] = positive_std;
margin[2] = negative_mean;
//margin[4] = negative_std;
}
else {
margin[1] = 0.99 * margin[1] + 0.01 * positive_mean;
//margin[2] = 0.99 * margin[2] + 0.01 * positive_std;
margin[2] = 0.99 * margin[2] + 0.01 * negative_mean;
//margin[4] = 0.99 * margin[4] + 0.01 * negative_std;
}
//margin[0] = (margin[3] - margin[1]) / (margin[2] + margin[4]) * margin[2];
margin[0] = (margin[2] - margin[1]) / 2;
caffe_copy(3, this->blobs_[0]->cpu_data(), top[1]->mutable_cpu_data());
}
}
if (bottom.size() == 3) {
margin[0] = bottom[2]->cpu_data()[0];
}
if (top.size() >= 2) {
top[1]->mutable_cpu_data()[0] = margin[0];
}
caffe_copy(count, bottom_data, top_data);
if (!margin_on_test_ && this->phase_ == TEST) return;
if (margin[0] != Dtype(0.0)) {
if (type_ == LabelSpecificMarginParameter_MarginType_SOFT) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificSoftMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, bottom_data, label_data, top_data, theta.mutable_gpu_data(), margin[0]);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, bottom_data, label_data, top_data, cos(margin[0] / 180 * M_PI), sin(margin[0] / 180 * M_PI));
CUDA_POST_KERNEL_CHECK;
}
}
}
template <typename Dtype>
void LabelSpecificMarginLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* margin = this->blobs_[0]->mutable_cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, top_diff, bottom_diff);
if (!margin_on_test_ && this->phase_ == TEST) return;
if (margin[0] != Dtype(0.0)) {
if (type_ == LabelSpecificMarginParameter_MarginType_SOFT) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificSoftMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, bottom_diff, bottom_data, theta.gpu_data(), top_data, margin[0]);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, bottom_diff, bottom_data, cos(margin[0] / 180 * M_PI), sin(margin[0] / 180 * M_PI));
CUDA_POST_KERNEL_CHECK;
if (bottom.size() == 3 && propagate_down[3]) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginBackwardToMargin<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, positive_data.mutable_gpu_data(), top_data);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(num, positive_data.gpu_data(), sum_multiplier_.gpu_data(), bottom[3]->mutable_cpu_data());
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificMarginLayer);
} // namespace caffe
| 88c0130977f223728b076fd95e87671dc7ede26c.cu | #include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/label_specific_margin_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArcCosDegree(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Dtype(acos(in[index]) / M_PI * 180.0);
}
}
template <typename Dtype>
__global__ void CreateMask(const int num, const int dim, const Dtype* label, Dtype* positive_mask, Dtype* negative_mask) {
CUDA_KERNEL_LOOP(index, num) {
int gt = static_cast<int>(label[index]);
positive_mask[index*dim + gt] = Dtype(1);
negative_mask[index*dim + gt] = Dtype(0);
}
}
template <typename Dtype>
__global__ void LabelSpecificSoftMarginForward(const int n, const int dim, const Dtype* bottom_data, const Dtype* label,
Dtype* top_data, Dtype* theta, Dtype margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
theta[index * dim + gt] = acos(bottom_data[index * dim + gt]);
if (margin * theta[index * dim + gt] > M_PI - 1e-2) {
theta[index * dim + gt] = M_PI - 1e-2;
}
top_data[index * dim + gt] = cos(margin * theta[index * dim + gt]);
}
}
template <typename Dtype>
__global__ void LabelSpecificSoftMarginBackward(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* bottom_diff, const Dtype* bottom_data, const Dtype* theta, const Dtype* top_data, Dtype margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
Dtype gradient = margin * sin(margin * theta[index * dim + gt]) / sqrt(1 - bottom_data[index * dim + gt] * bottom_data[index * dim + gt] + 1e-12);
gradient = gradient > 2 ? 2 : gradient;//bound the gradient.
gradient = gradient < -2 ? -2 : gradient;
bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * gradient;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginForward(const int n, const int dim, const Dtype* bottom_data, const Dtype* label,
Dtype* top_data, Dtype cos_margin, Dtype sin_margin) {
CUDA_KERNEL_LOOP(index, n) {
int l = static_cast<int>(label[index]);
top_data[index * dim + l] = bottom_data[index * dim + l] * cos_margin -
sqrt(1 - bottom_data[index * dim + l] * bottom_data[index * dim + l] + 1e-12) * sin_margin;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginBackward(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* bottom_diff, const Dtype* bottom_data, Dtype cos_margin, Dtype sin_margin) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
Dtype gradient = cos_margin -
bottom_data[index * dim + gt] / sqrt(1 - bottom_data[index * dim + gt] * bottom_data[index * dim + gt] + 1e-12) * sin_margin;
gradient = gradient > 2 ? 2 : gradient;//bound the gradient.
gradient = gradient < -2 ? -2 : gradient;
bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * gradient;
}
}
template <typename Dtype>
__global__ void LabelSpecificHardMarginBackwardToMargin(const int n, const int dim, const Dtype* top_diff, const Dtype* label,
Dtype* margin_diff, const Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
margin_diff[index] = top_diff[index * dim + gt] * sqrt(1 - top_data[index * dim + gt] * top_data[index * dim + gt]);
}
}
template <typename Dtype>
void LabelSpecificMarginLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* margin = this->blobs_[0]->mutable_cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
if (has_margin_base_ && this->phase_ == TRAIN) {
margin[0] = margin_base_ + pow(((Dtype)1. + gamma_ * iter_), power_) - 1;
iter_++;
}
if (has_margin_max_ && this->phase_ == TRAIN) {
margin[0] = std::min(margin[0], margin_max_);
}
if (top.size() == 2 && auto_tune_) {
Dtype *positive_mask_data = positive_mask.mutable_gpu_data();
Dtype *negative_mask_data = negative_mask.mutable_gpu_data();
caffe_gpu_set(count, Dtype(0), positive_mask_data);
caffe_gpu_set(count, Dtype(1), negative_mask_data);
// NOLINT_NEXT_LINE(whitespace/operators)
CreateMask<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, positive_mask.mutable_gpu_data(), negative_mask.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
Dtype positive_mean;
//Dtype positive_std;
Dtype negative_mean;
//Dtype negative_std;
// NOLINT_NEXT_LINE(whitespace/operators)
ArcCosDegree<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (
count, bottom_data, bottom_angle.mutable_gpu_data());
//caffe_gpu_powx(count, bottom_angle.gpu_data(), Dtype(2), bottom_square.mutable_gpu_data());
caffe_gpu_dot(count, bottom_angle.gpu_data(), positive_mask.gpu_data(), &positive_mean);
//caffe_gpu_dot(count, bottom_square.gpu_data(), positive_mask.gpu_data(), &positive_std);
caffe_gpu_dot(count, bottom_angle.gpu_data(), negative_mask.gpu_data(), &negative_mean);
//caffe_gpu_dot(count, bottom_square.gpu_data(), negative_mask.gpu_data(), &negative_std);
positive_mean /= num;
//positive_std = sqrt(positive_std / num - positive_mean * positive_mean);
negative_mean /= num * (dim - 1);
//negative_std = sqrt(negative_std / num / (dim - 1) - negative_mean * negative_mean);
if (this->phase_ == TEST) {
top[1]->mutable_cpu_data()[0] = margin[0];
top[1]->mutable_cpu_data()[1] = positive_mean;
//top[1]->mutable_cpu_data()[2] = positive_std;
top[1]->mutable_cpu_data()[2] = negative_mean;
//top[1]->mutable_cpu_data()[4] = negative_std;
}
else {
if (iter_ == 1) {
margin[1] = positive_mean;
//margin[2] = positive_std;
margin[2] = negative_mean;
//margin[4] = negative_std;
}
else {
margin[1] = 0.99 * margin[1] + 0.01 * positive_mean;
//margin[2] = 0.99 * margin[2] + 0.01 * positive_std;
margin[2] = 0.99 * margin[2] + 0.01 * negative_mean;
//margin[4] = 0.99 * margin[4] + 0.01 * negative_std;
}
//margin[0] = (margin[3] - margin[1]) / (margin[2] + margin[4]) * margin[2];
margin[0] = (margin[2] - margin[1]) / 2;
caffe_copy(3, this->blobs_[0]->cpu_data(), top[1]->mutable_cpu_data());
}
}
if (bottom.size() == 3) {
margin[0] = bottom[2]->cpu_data()[0];
}
if (top.size() >= 2) {
top[1]->mutable_cpu_data()[0] = margin[0];
}
caffe_copy(count, bottom_data, top_data);
if (!margin_on_test_ && this->phase_ == TEST) return;
if (margin[0] != Dtype(0.0)) {
if (type_ == LabelSpecificMarginParameter_MarginType_SOFT) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificSoftMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, bottom_data, label_data, top_data, theta.mutable_gpu_data(), margin[0]);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, bottom_data, label_data, top_data, cos(margin[0] / 180 * M_PI), sin(margin[0] / 180 * M_PI));
CUDA_POST_KERNEL_CHECK;
}
}
}
template <typename Dtype>
void LabelSpecificMarginLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* margin = this->blobs_[0]->mutable_cpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, top_diff, bottom_diff);
if (!margin_on_test_ && this->phase_ == TEST) return;
if (margin[0] != Dtype(0.0)) {
if (type_ == LabelSpecificMarginParameter_MarginType_SOFT) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificSoftMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, bottom_diff, bottom_data, theta.gpu_data(), top_data, margin[0]);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, bottom_diff, bottom_data, cos(margin[0] / 180 * M_PI), sin(margin[0] / 180 * M_PI));
CUDA_POST_KERNEL_CHECK;
if (bottom.size() == 3 && propagate_down[3]) {
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificHardMarginBackwardToMargin<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, top_diff, label_data, positive_data.mutable_gpu_data(), top_data);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(num, positive_data.gpu_data(), sum_multiplier_.gpu_data(), bottom[3]->mutable_cpu_data());
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificMarginLayer);
} // namespace caffe
|
337c09491f7e8c05ca4e9574e235e321f5861ecb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "constants.h"
__global__ void sum2Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx];
return;
}
__global__ void sum3Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
{
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx]
+ c3Val * d_inp3[idx];
}
return;
}
__global__ void sum4Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, double c4Val, double *d_inp4, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
{
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx]
+ c3Val * d_inp3[idx] + c4Val * d_inp4[idx];
}
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( sum2Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_out, c1Val, d_inp1, c2Val, d_inp2, len);
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( sum3Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_out, c1Val, d_inp1, c2Val, d_inp2, c3Val, d_inp3, len);
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, double c4Val, double *d_inp4, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( sum4Kernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_out, c1Val, d_inp1, c2Val, d_inp2, c3Val, d_inp3, c4Val, d_inp4, len);
return;
}
| 337c09491f7e8c05ca4e9574e235e321f5861ecb.cu | #include "constants.h"
__global__ void sum2Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx];
return;
}
__global__ void sum3Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
{
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx]
+ c3Val * d_inp3[idx];
}
return;
}
__global__ void sum4Kernel(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, double c4Val, double *d_inp4, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx < len )
{
d_out[idx] = c1Val * d_inp1[idx] + c2Val * d_inp2[idx]
+ c3Val * d_inp3[idx] + c4Val * d_inp4[idx];
}
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
sum2Kernel <<<blkNum, BLKDIM>>> (d_out, c1Val, d_inp1, c2Val, d_inp2, len);
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
sum3Kernel <<<blkNum, BLKDIM>>> (d_out, c1Val, d_inp1, c2Val, d_inp2, c3Val, d_inp3, len);
return;
}
void vectorSum(double *d_out, double c1Val, double *d_inp1, double c2Val, double *d_inp2,
double c3Val, double *d_inp3, double c4Val, double *d_inp4, int len)
{
int blkNum = (len - 1) / BLKDIM + 1;
sum4Kernel <<<blkNum, BLKDIM>>> (d_out, c1Val, d_inp1, c2Val, d_inp2, c3Val, d_inp3, c4Val, d_inp4, len);
return;
}
|
e93cf15718eb6975051e3efc80e30b162a5d1b9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void smem_dynamic_test(int * in, int * out, int size)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int smem[];
if (gid < size)
{
smem[tid] = in[gid];
out[gid] = smem[tid];
}
} | e93cf15718eb6975051e3efc80e30b162a5d1b9f.cu | #include "includes.h"
__global__ void smem_dynamic_test(int * in, int * out, int size)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int smem[];
if (gid < size)
{
smem[tid] = in[gid];
out[gid] = smem[tid];
}
} |
0f269a316cdca0180db5e4ae91b71da9c1f2eccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int index = numCols * blockIdx.x + threadIdx.x;
uchar4 color = *(rgbaImage + index);
unsigned char grey = (unsigned char)(0.299f * color.x + 0.587f * color.y + 0.114f * color.z);
greyImage[index] = grey;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 0f269a316cdca0180db5e4ae91b71da9c1f2eccc.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int index = numCols * blockIdx.x + threadIdx.x;
uchar4 color = *(rgbaImage + index);
unsigned char grey = (unsigned char)(0.299f * color.x + 0.587f * color.y + 0.114f * color.z);
greyImage[index] = grey;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
fc1c735fbd1cfd622a5976b15220b88d269ce3f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
| fc1c735fbd1cfd622a5976b15220b88d269ce3f6.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
bools[index] = idata[index] == 0 ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
if (bools[index] == 1) {
odata[indices[index]] = idata[index];
}
}
}
}
|
7323bf7832175cede4469e5b0be076b659fc0e3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "pagerank.h"
// float
void setResDesc(hipResourceDesc &resDesc, float *d_error) {
resDesc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
}
// double
void setResDesc(hipResourceDesc &resDesc, double *d_error) {
resDesc.res.linear.desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
}
| 7323bf7832175cede4469e5b0be076b659fc0e3f.cu | #include "pagerank.h"
// 设置纹理对象属性(float型重载)
void setResDesc(cudaResourceDesc &resDesc, float *d_error) {
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
}
// 设置纹理对象属性(double型重载)
void setResDesc(cudaResourceDesc &resDesc, double *d_error) {
resDesc.res.linear.desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
}
|
dc7a2441018d51b87a2935e67befb8c9a7d8d19f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "string.h"
#include <hip/hip_runtime.h>
#include "hip/hip_runtime_api.h"
//===========================================================================//
void describe ( int device )
{
hipDeviceProp_t device_properties;
::memset( &device_properties, 0, sizeof(device_properties));
std::cout << "***************************************"
<< "***************************************" << std::endl;
std::cout << "Device number: " << device << std::endl;
if ( hipSuccess ==
hipGetDeviceProperties( &device_properties, device ) )
{
std::cout << "name: "
<< "ASCII string identifying device: "
<< device_properties.name << std::endl;
std::cout << "totalGlobalMem: "
<< "Global memory available on device in bytes: "
<< device_properties.totalGlobalMem << std::endl;
std::cout << "sharedMemPerBlock: "
<< "Shared memory available per block in bytes: "
<< device_properties.sharedMemPerBlock << std::endl;
std::cout << "regsPerBlock: "
<< "32-bit registers available per block: "
<< device_properties.regsPerBlock << std::endl;
std::cout << "warpSize: "
<< "Warp size in threads: "
<< device_properties.warpSize << std::endl;
std::cout << "memPitch: "
<< "Maximum pitch in bytes allowed by memory copies: "
<< device_properties.memPitch << std::endl;
std::cout << "maxThreadsPerBlock: "
<< "Maximum number of threads per block: "
<< device_properties.maxThreadsPerBlock << std::endl;
std::cout << "maxThreadsDim[3]: "
<< "Maximum size of each dimension of a block: "
<< device_properties.maxThreadsDim[0] << " "
<< device_properties.maxThreadsDim[1] << " "
<< device_properties.maxThreadsDim[2] << std::endl;
std::cout << "maxGridSize[3]: "
<< "Maximum size of each dimension of a grid: "
<< device_properties.maxGridSize[0] << " "
<< device_properties.maxGridSize[1] << " "
<< device_properties.maxGridSize[2] << std::endl;
std::cout << "clockRate: "
<< "Clock frequency in kilohertz: "
<< device_properties.clockRate << std::endl;
std::cout << "totalConstMem: "
<< "Constant memory available on device in bytes: "
<< device_properties.totalConstMem << std::endl;
std::cout << "major: "
<< "Major compute capability: "
<< device_properties.major << std::endl;
std::cout << "minor: "
<< "Minor compute capability: "
<< device_properties.minor << std::endl;
std::cout << "textureAlignment: "
<< "Alignment requirement for textures: "
<< device_properties.textureAlignment << std::endl;
std::cout << "deviceOverlap: "
<< "Device can concurrently copy memory and execute a kernel: "
<< device_properties.deviceOverlap << std::endl;
std::cout << "multiProcessorCount: "
<< "Number of multiprocessors on device: "
<< device_properties.multiProcessorCount << std::endl;
std::cout << "kernelExecTimeoutEnable: "
<< "Specified whether there is a run time limit on kernels: "
<< device_properties.kernelExecTimeoutEnabled << std::endl;
std::cout << "integrated: "
<< "Device is integrated as opposed to discrete: "
<< device_properties.integrated << std::endl;
std::cout << "canMapHostMemory: "
<< "Device can map host memory with hipHostMalloc/hipHostGetDevicePointer: "
<< device_properties.canMapHostMemory << std::endl;
std::cout << "computeMode: "
<< "Compute mode (See ::hipComputeMode): "
<< device_properties.computeMode << std::endl;
#define OUTPUT(NAME,DESC) \
std::cout << #NAME << ": " << DESC << " " << device_properties.NAME << std::endl;
OUTPUT(surfaceAlignment,"the alignment requirements for surfaces.")
OUTPUT(concurrentKernels,"is 1 if the device supports executing multiple kernels within the same context simultaneously, or 0 if not. It is not guaranteed that multiple kernels will be resident on the device concurrently so this feature should not be relied upon for correctness")
OUTPUT(ECCEnabled,"is 1 if the device has ECC support turned on, or 0 if not.")
OUTPUT(pciBusID,"the PCI bus identifier of the device")
OUTPUT(pciDeviceID,"the PCI device (sometimes called slot) identifier of the device")
OUTPUT(pciDomainID,"the PCI domain identifier of the device")
OUTPUT(tccDriver,"1 if the device is using a TCC driver or 0 if not")
OUTPUT(asyncEngineCount,"1 when the device can concurrently copy memory between host and device while executing a kernel. It is 2 when the device can concurrently copy memory between host and device in both directions and execute a kernel at the same time. It is 0 if neither of these is supported.")
OUTPUT(unifiedAddressing,"1 if the device shares a unified address space with the host and 0 otherwise")
OUTPUT(memoryClockRate,"the peak memory clock frequency in kilohertz")
OUTPUT(memoryBusWidth,"the memory bus width in bits")
OUTPUT(l2CacheSize,"L2 cache size in bytes")
OUTPUT(maxThreadsPerMultiProcessor,"the number of maximum resident threads per multiprocessor")
}
std::cout << "***************************************"
<< "***************************************" << std::endl;
}
//===========================================================================//
int get_count ()
{
int num_devices = 0;
::hipGetDeviceCount( &num_devices );
return num_devices;
}
//===========================================================================//
void describe ()
{
for ( int device=0; device < get_count(); ++device )
{
describe( device );
}
}
//===========================================================================//
int main ()
{
describe();
}
//===========================================================================//
| dc7a2441018d51b87a2935e67befb8c9a7d8d19f.cu |
#include <iostream>
#include "string.h"
#include <cuda.h>
#include "cuda_runtime_api.h"
//===========================================================================//
void describe ( int device )
{
cudaDeviceProp device_properties;
::memset( &device_properties, 0, sizeof(device_properties));
std::cout << "***************************************"
<< "***************************************" << std::endl;
std::cout << "Device number: " << device << std::endl;
if ( cudaSuccess ==
cudaGetDeviceProperties( &device_properties, device ) )
{
std::cout << "name: "
<< "ASCII string identifying device: "
<< device_properties.name << std::endl;
std::cout << "totalGlobalMem: "
<< "Global memory available on device in bytes: "
<< device_properties.totalGlobalMem << std::endl;
std::cout << "sharedMemPerBlock: "
<< "Shared memory available per block in bytes: "
<< device_properties.sharedMemPerBlock << std::endl;
std::cout << "regsPerBlock: "
<< "32-bit registers available per block: "
<< device_properties.regsPerBlock << std::endl;
std::cout << "warpSize: "
<< "Warp size in threads: "
<< device_properties.warpSize << std::endl;
std::cout << "memPitch: "
<< "Maximum pitch in bytes allowed by memory copies: "
<< device_properties.memPitch << std::endl;
std::cout << "maxThreadsPerBlock: "
<< "Maximum number of threads per block: "
<< device_properties.maxThreadsPerBlock << std::endl;
std::cout << "maxThreadsDim[3]: "
<< "Maximum size of each dimension of a block: "
<< device_properties.maxThreadsDim[0] << " "
<< device_properties.maxThreadsDim[1] << " "
<< device_properties.maxThreadsDim[2] << std::endl;
std::cout << "maxGridSize[3]: "
<< "Maximum size of each dimension of a grid: "
<< device_properties.maxGridSize[0] << " "
<< device_properties.maxGridSize[1] << " "
<< device_properties.maxGridSize[2] << std::endl;
std::cout << "clockRate: "
<< "Clock frequency in kilohertz: "
<< device_properties.clockRate << std::endl;
std::cout << "totalConstMem: "
<< "Constant memory available on device in bytes: "
<< device_properties.totalConstMem << std::endl;
std::cout << "major: "
<< "Major compute capability: "
<< device_properties.major << std::endl;
std::cout << "minor: "
<< "Minor compute capability: "
<< device_properties.minor << std::endl;
std::cout << "textureAlignment: "
<< "Alignment requirement for textures: "
<< device_properties.textureAlignment << std::endl;
std::cout << "deviceOverlap: "
<< "Device can concurrently copy memory and execute a kernel: "
<< device_properties.deviceOverlap << std::endl;
std::cout << "multiProcessorCount: "
<< "Number of multiprocessors on device: "
<< device_properties.multiProcessorCount << std::endl;
std::cout << "kernelExecTimeoutEnable: "
<< "Specified whether there is a run time limit on kernels: "
<< device_properties.kernelExecTimeoutEnabled << std::endl;
std::cout << "integrated: "
<< "Device is integrated as opposed to discrete: "
<< device_properties.integrated << std::endl;
std::cout << "canMapHostMemory: "
<< "Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer: "
<< device_properties.canMapHostMemory << std::endl;
std::cout << "computeMode: "
<< "Compute mode (See ::cudaComputeMode): "
<< device_properties.computeMode << std::endl;
#define OUTPUT(NAME,DESC) \
std::cout << #NAME << ": " << DESC << " " << device_properties.NAME << std::endl;
OUTPUT(surfaceAlignment,"the alignment requirements for surfaces.")
OUTPUT(concurrentKernels,"is 1 if the device supports executing multiple kernels within the same context simultaneously, or 0 if not. It is not guaranteed that multiple kernels will be resident on the device concurrently so this feature should not be relied upon for correctness")
OUTPUT(ECCEnabled,"is 1 if the device has ECC support turned on, or 0 if not.")
OUTPUT(pciBusID,"the PCI bus identifier of the device")
OUTPUT(pciDeviceID,"the PCI device (sometimes called slot) identifier of the device")
OUTPUT(pciDomainID,"the PCI domain identifier of the device")
OUTPUT(tccDriver,"1 if the device is using a TCC driver or 0 if not")
OUTPUT(asyncEngineCount,"1 when the device can concurrently copy memory between host and device while executing a kernel. It is 2 when the device can concurrently copy memory between host and device in both directions and execute a kernel at the same time. It is 0 if neither of these is supported.")
OUTPUT(unifiedAddressing,"1 if the device shares a unified address space with the host and 0 otherwise")
OUTPUT(memoryClockRate,"the peak memory clock frequency in kilohertz")
OUTPUT(memoryBusWidth,"the memory bus width in bits")
OUTPUT(l2CacheSize,"L2 cache size in bytes")
OUTPUT(maxThreadsPerMultiProcessor,"the number of maximum resident threads per multiprocessor")
}
std::cout << "***************************************"
<< "***************************************" << std::endl;
}
//===========================================================================//
int get_count ()
{
int num_devices = 0;
::cudaGetDeviceCount( &num_devices );
return num_devices;
}
//===========================================================================//
void describe ()
{
for ( int device=0; device < get_count(); ++device )
{
describe( device );
}
}
//===========================================================================//
int main ()
{
describe();
}
//===========================================================================//
|
65991b9179bddd96f69df46f76d68871c2ef8fc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "neutron_gpu_kernel.h"
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// The global atomics are supposed to be optimized by compiler (>= CUDA 9)
// https://devblogs.nvidia.com/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics
using namespace cooperative_groups;
__global__
void neutron_gpu_kernel(long n,
int neutronsPerThread,
const ProblemParameters* params,
unsigned long long int* next_absorbed,
float* absorbed,
unsigned long long int* d_r,
unsigned long long int* d_b,
unsigned long long int* d_t,
unsigned long long* seeds,
hiprandState_t* states) {
const long id = blockIdx.x*blockDim.x + threadIdx.x;
hiprand_init(seeds[id], id, 0, states+id);
const float c = params->c;
const float c_c = params->c_c;
const float h = params->h;
unsigned long long int r = 0, b = 0, t = 0;
long cpt = (blockIdx.x*blockDim.x + threadIdx.x)*neutronsPerThread;
for (long i=0; i<neutronsPerThread; i++) {
if (!(cpt < n))
break;
float d = 0.0;
float x = 0.0;
float v;
while (1) {
const float u = hiprand_uniform (states+id);
float L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0) {
r++;
v = NO_VAL;
break;
}
else if (x >= h) {
t++;
v = NO_VAL;
break;
}
else if (hiprand_uniform (states+id) < c_c / c) {
b++;
v = x;
break;
}
else {
const float u = hiprand_uniform (states+id);
d = u * M_PI;
}
}
if (v != NO_VAL) {
auto g = coalesced_threads();
unsigned long long int pos;
if (g.thread_rank() == 0)
pos = atomicAdd(next_absorbed, g.size());
absorbed[g.shfl(pos, 0) + g.thread_rank()] = v;
}
cpt++;
}
atomicAdd(d_r, r);
atomicAdd(d_b, b);
atomicAdd(d_t, t);
}
| 65991b9179bddd96f69df46f76d68871c2ef8fc1.cu | #include "neutron_gpu_kernel.h"
#include <cuda.h>
#include <cooperative_groups.h>
// The global atomics are supposed to be optimized by compiler (>= CUDA 9)
// https://devblogs.nvidia.com/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics
using namespace cooperative_groups;
__global__
void neutron_gpu_kernel(long n,
int neutronsPerThread,
const ProblemParameters* params,
unsigned long long int* next_absorbed,
float* absorbed,
unsigned long long int* d_r,
unsigned long long int* d_b,
unsigned long long int* d_t,
unsigned long long* seeds,
curandState* states) {
const long id = blockIdx.x*blockDim.x + threadIdx.x;
curand_init(seeds[id], id, 0, states+id);
const float c = params->c;
const float c_c = params->c_c;
const float h = params->h;
unsigned long long int r = 0, b = 0, t = 0;
long cpt = (blockIdx.x*blockDim.x + threadIdx.x)*neutronsPerThread;
for (long i=0; i<neutronsPerThread; i++) {
if (!(cpt < n))
break;
float d = 0.0;
float x = 0.0;
float v;
while (1) {
const float u = curand_uniform (states+id);
float L = -(1 / c) * log(u);
x = x + L * cos(d);
if (x < 0) {
r++;
v = NO_VAL;
break;
}
else if (x >= h) {
t++;
v = NO_VAL;
break;
}
else if (curand_uniform (states+id) < c_c / c) {
b++;
v = x;
break;
}
else {
const float u = curand_uniform (states+id);
d = u * M_PI;
}
}
if (v != NO_VAL) {
auto g = coalesced_threads();
unsigned long long int pos;
if (g.thread_rank() == 0)
pos = atomicAdd(next_absorbed, g.size());
absorbed[g.shfl(pos, 0) + g.thread_rank()] = v;
}
cpt++;
}
atomicAdd(d_r, r);
atomicAdd(d_b, b);
atomicAdd(d_t, t);
}
|
2484d60d06e7d520940f2c6cf3500f58e0700f31.hip | // !!! This is a file automatically generated by hipify!!!
// //////////////////////////////////////////////////////////
// toojpeg.cpp
// written by Stephan Brumme, 2018-2019
// see https://create.stephan-brumme.com/toojpeg/
//
#include "toojpeg_modified.h"
// - the "official" specifications: https://www.w3.org/Graphics/JPEG/itu-t81.pdf and https://www.w3.org/Graphics/JPEG/jfif3.pdf
// - Wikipedia has a short description of the JFIF/JPEG file format: https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format
// - the popular STB Image library includes Jon's JPEG encoder as well: https://github.com/nothings/stb/blob/master/stb_image_write.h
// - the most readable JPEG book (from a developer's perspective) is Miano's "Compressed Image File Formats" (1999, ISBN 0-201-60443-4),
// used copies are really cheap nowadays and include a CD with C++ sources as well (plus great format descriptions of GIF & PNG)
// - much more detailled is Mitchell/Pennebaker's "JPEG: Still Image Data Compression Standard" (1993, ISBN 0-442-01272-1)
// which contains the official JPEG standard, too - fun fact: I bought a signed copy in a second-hand store without noticing
namespace // anonymous namespace to hide local functions / constants / etc.
{
// ////////////////////////////////////////
// data types
using uint8_t = unsigned char;
using uint16_t = unsigned short;
using int16_t = short;
using int32_t = int; // at least four bytes
// ////////////////////////////////////////
// constants
// quantization tables from JPEG Standard, Annex K
const uint8_t DefaultQuantLuminance[8*8] =
{ 16, 11, 10, 16, 24, 40, 51, 61, // there are a few experts proposing slightly more efficient values,
12, 12, 14, 19, 26, 58, 60, 55, // e.g. https://www.imagemagick.org/discourse-server/viewtopic.php?t=20333
14, 13, 16, 24, 40, 57, 69, 56, // btw: Google's Guetzli project optimizes the quantization tables per image
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68,109,103, 77,
24, 35, 55, 64, 81,104,113, 92,
49, 64, 78, 87,103,121,120,101,
72, 92, 95, 98,112,100,103, 99 };
const uint8_t DefaultQuantChrominance[8*8] =
{ 17, 18, 24, 47, 99, 99, 99, 99,
18, 21, 26, 66, 99, 99, 99, 99,
24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99 };
const float dct_matrix[8][8] = {
{ 0.353553, 0.353553, 0.353553, 0.353553, 0.353553, 0.353553, 0.353553,0.353553},
{ 0.490393, 0.415735, 0.277785, 0.0975452, -0.0975452, -0.277785, -0.415735,-0.490393},
{ 0.46194, 0.191342, -0.191342, -0.46194, -0.46194, -0.191342, 0.191342,0.46194},
{ 0.415735, -0.0975452, -0.490393, -0.277785, 0.277785, 0.490393, 0.0975453,-0.415735},
{ 0.353553, -0.353553, -0.353553, 0.353553, 0.353553, -0.353553, -0.353553,0.353553},
{ 0.277785, -0.490393, 0.0975452, 0.415735, -0.415735, -0.0975451, 0.490393,-0.277785},
{ 0.191342, -0.46194, 0.46194, -0.191342, -0.191342, 0.46194, -0.46194,0.191342},
{ 0.0975452, -0.277785, 0.415735, -0.490393, 0.490393, -0.415735, 0.277785,-0.0975448}
};
const float dct_matrix_transpose[8][8] = {
{ 0.353553, 0.490393, 0.46194, 0.415735, 0.353553, 0.277785, 0.191342,0.0975452},
{ 0.353553, 0.415735, 0.191342, -0.0975452, -0.353553, -0.490393, -0.46194,-0.277785},
{ 0.353553, 0.277785, -0.191342, -0.490393, -0.353553, 0.0975452, 0.46194,0.415735},
{ 0.353553, 0.0975452, -0.46194, -0.277785, 0.353553, 0.415735, -0.191342,-0.490393},
{ 0.353553, -0.0975452, -0.46194, 0.277785, 0.353553, -0.415735, -0.191342,0.490393},
{ 0.353553, -0.277785, -0.191342, 0.490393, -0.353553, -0.0975451, 0.46194,-0.415735},
{ 0.353553, -0.415735, 0.191342, 0.0975453, -0.353553, 0.490393, -0.46194,0.277785},
{ 0.353553, -0.490393, 0.46194, -0.415735, 0.353553, -0.277785, 0.191342,-0.0975448}
};
const float dct_correction_matrix[8][8] = { // combine with the other scale matrix
{8.00000, 11.09631, 7.52311, 9.40692, 6.19024, 6.28556, 2.81439, 2.20719},
{11.09631, 1, 1, 1, 1, 1, 1, 1},
{9.05127, 1, 1, 1, 1, 1, 1, 1},
{9.40699, 1, 1, 1, 1, 1, 1, 1},
{4.14146, 1, 1, 1, 1, 1, 1, 1},
{6.28555, 1, 1, 1, 1, 1, 1, 1},
{3.48541, 1, 1, 1, 1, 1, 1, 1},
{2.20719, 1, 1, 1, 1, 1, 1, 1}
};
// 8x8 blocks are processed in zig-zag order
// most encoders use a zig-zag "forward" table, I switched to its inverse for performance reasons
// note: ZigZagInv[ZigZag[i]] = i
const uint8_t ZigZagInv[8*8] =
{ 0, 1, 8,16, 9, 2, 3,10, // ZigZag[] = 0, 1, 5, 6,14,15,27,28,
17,24,32,25,18,11, 4, 5, // 2, 4, 7,13,16,26,29,42,
12,19,26,33,40,48,41,34, // 3, 8,12,17,25,30,41,43,
27,20,13, 6, 7,14,21,28, // 9,11,18,24,31,40,44,53,
35,42,49,56,57,50,43,36, // 10,19,23,32,39,45,52,54,
29,22,15,23,30,37,44,51, // 20,22,33,38,46,51,55,60,
58,59,52,45,38,31,39,46, // 21,34,37,47,50,56,59,61,
53,60,61,54,47,55,62,63 }; // 35,36,48,49,57,58,62,63
// static Huffman code tables from JPEG standard Annex K
// - CodesPerBitsize tables define how many Huffman codes will have a certain bitsize (plus 1 because there nothing with zero bits),
// e.g. DcLuminanceCodesPerBitsize[2] = 5 because there are 5 Huffman codes being 2+1=3 bits long
// - Values tables are a list of values ordered by their Huffman code bitsize,
// e.g. AcLuminanceValues => Huffman(0x01,0x02 and 0x03) will have 2 bits, Huffman(0x00) will have 3 bits, Huffman(0x04,0x11 and 0x05) will have 4 bits, ...
// Huffman definitions for first DC/AC tables (luminance / Y channel)
const uint8_t DcLuminanceCodesPerBitsize[16] = { 0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; // sum = 12
const uint8_t DcLuminanceValues [12] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; // => 12 codes
const uint8_t AcLuminanceCodesPerBitsize[16] = { 0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,125 }; // sum = 162
const uint8_t AcLuminanceValues [162] = // => 162 codes
{ 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xA1,0x08, // 16*10+2 symbols because
0x23,0x42,0xB1,0xC1,0x15,0x52,0xD1,0xF0,0x24,0x33,0x62,0x72,0x82,0x09,0x0A,0x16,0x17,0x18,0x19,0x1A,0x25,0x26,0x27,0x28, // upper 4 bits can be 0..F
0x29,0x2A,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x53,0x54,0x55,0x56,0x57,0x58,0x59, // while lower 4 bits can be 1..A
0x5A,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x83,0x84,0x85,0x86,0x87,0x88,0x89, // plus two special codes 0x00 and 0xF0
0x8A,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xB2,0xB3,0xB4,0xB5,0xB6, // order of these symbols was determined empirically by JPEG committee
0xB7,0xB8,0xB9,0xBA,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xE1,0xE2,
0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA };
// Huffman definitions for second DC/AC tables (chrominance / Cb and Cr channels)
const uint8_t DcChrominanceCodesPerBitsize[16] = { 0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; // sum = 12
const uint8_t DcChrominanceValues [12] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; // => 12 codes (identical to DcLuminanceValues)
const uint8_t AcChrominanceCodesPerBitsize[16] = { 0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,119 }; // sum = 162
const uint8_t AcChrominanceValues [162] = // => 162 codes
{ 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, // same number of symbol, just different order
0xA1,0xB1,0xC1,0x09,0x23,0x33,0x52,0xF0,0x15,0x62,0x72,0xD1,0x0A,0x16,0x24,0x34,0xE1,0x25,0xF1,0x17,0x18,0x19,0x1A,0x26, // (which is more efficient for AC coding)
0x27,0x28,0x29,0x2A,0x35,0x36,0x37,0x38,0x39,0x3A,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x53,0x54,0x55,0x56,0x57,0x58,
0x59,0x5A,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x82,0x83,0x84,0x85,0x86,0x87,
0x88,0x89,0x8A,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xB2,0xB3,0xB4,
0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,
0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA };
const int16_t CodeWordLimit = 2048; // +/-2^11, maximum value after DCT
// ////////////////////////////////////////
// structs
// represent a single Huffman code
struct BitCode
{
BitCode() = default; // undefined state, must be initialized at a later time
BitCode(uint16_t code_, uint8_t numBits_)
: code(code_), numBits(numBits_) {}
uint16_t code; // JPEG's Huffman codes are limited to 16 bits
uint8_t numBits; // number of valid bits
};
// wrapper for bit output operations
struct BitWriter
{
// user-supplied callback that writes/stores one byte
TooJpeg::WRITE_ONE_BYTE output;
// initialize writer
explicit BitWriter(TooJpeg::WRITE_ONE_BYTE output_) : output(output_) {}
// store the most recently encoded bits that are not written yet
struct BitBuffer
{
int32_t data = 0; // actually only at most 24 bits are used
uint8_t numBits = 0; // number of valid bits (the right-most bits)
} buffer;
// write Huffman bits stored in BitCode, keep excess bits in BitBuffer
BitWriter& operator<<(const BitCode& data)
{
// append the new bits to those bits leftover from previous call(s)
buffer.numBits += data.numBits;
buffer.data <<= data.numBits;
buffer.data |= data.code;
// write all "full" bytes
while (buffer.numBits >= 8)
{
// extract highest 8 bits
buffer.numBits -= 8;
auto oneByte = uint8_t(buffer.data >> buffer.numBits);
output(oneByte);
if (oneByte == 0xFF) // 0xFF has a special meaning for JPEGs (it's a block marker)
output(0); // therefore pad a zero to indicate "nope, this one ain't a marker, it's just a coincidence"
// note: I don't clear those written bits, therefore buffer.bits may contain garbage in the high bits
// if you really want to "clean up" (e.g. for debugging purposes) then uncomment the following line
//buffer.bits &= (1 << buffer.numBits) - 1;
}
return *this;
}
// write all non-yet-written bits, fill gaps with 1s (that's a strange JPEG thing)
void flush()
{
// at most seven set bits needed to "fill" the last byte: 0x7F = binary 0111 1111
*this << BitCode(0x7F, 7); // I should set buffer.numBits = 0 but since there are no single bits written after flush() I can safely ignore it
}
// NOTE: all the following BitWriter functions IGNORE the BitBuffer and write straight to output !
// write a single byte
BitWriter& operator<<(uint8_t oneByte)
{
output(oneByte);
return *this;
}
// write an array of bytes
template <typename T, int Size>
BitWriter& operator<<(T (&manyBytes)[Size])
{
for (auto c : manyBytes)
output(c);
return *this;
}
// start a new JFIF block
void addMarker(uint8_t id, uint16_t length)
{
output(0xFF); output(id); // ID, always preceded by 0xFF
output(uint8_t(length >> 8)); // length of the block (big-endian, includes the 2 length bytes as well)
output(uint8_t(length & 0xFF));
}
};
// ////////////////////////////////////////
// functions / templates
// same as ::min()
template <typename Number>
Number minimum(Number value, Number maximum)
{
return value <= maximum ? value : maximum;
}
// restrict a value to the interval [minimum, maximum]
template <typename Number, typename Limit>
Number clamp(Number value, Limit minValue, Limit maxValue)
{
if (value <= minValue) return minValue; // never smaller than the minimum
if (value >= maxValue) return maxValue; // never bigger than the maximum
return value; // value was inside interval, keep it
}
// convert from RGB to YCbCr, constants are similar to ITU-R, see https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion
float rgb2y (float r, float g, float b) { return +0.299f * r +0.587f * g +0.114f * b; }
float rgb2cb(float r, float g, float b) { return -0.16874f * r -0.33126f * g +0.5f * b; }
float rgb2cr(float r, float g, float b) { return +0.5f * r -0.41869f * g -0.08131f * b; }
/*
placeholder for GPU accelerated version
*/
void transformBlock(float block[8][8], const float scaled[8*8], int16_t quantized[8*8])
{
/*
STEP 1: DCT
Paralellizability: strong (matmul)
Status: done, needs implementing
*/
/* gpu accelerated 8x8 DCT function */
DCT_8x8(block);
// "linearize" the 8x8 block, treat it as a flat array of 64 floats
auto block64 = (float*) block;
/*
Step 2: Scale
Paralellizability: Strong (for loop)
Status: not done
*/
for (auto i = 0; i < 8*8; i++)
block64[i] *= scaled[i];
// quantize and zigzag the other 63 coefficients
auto posNonZero = 0; // find last coefficient which is not zero (because trailing zeros are encoded differently)
/*
Step 3: Quantization
Paralellizability: Strong (double for loop)
Status: not done
*/
for (auto i = 0; i < 8*8; i++)
{
auto value = block64[ZigZagInv[i]];
// round to nearest integer
quantized[i] = int(value + (value >= 0 ? +0.5f : -0.5f)); // C++11's nearbyint() achieves a similar effect
// remember offset of last non-zero coefficient
if (quantized[i] != 0)
posNonZero = i;
}
}
/*
data is an aray of n 8*8 blocks
scale is an array of 8*8
posNonZero will store the position of the last non zero value for each N blocks
n is the number of blocks
*/
void transformBlock_many(float* const data, const float* const scale, float* const posNonZero, const uint32_t n)
{
// DCT
// Scale (remove scale step from DCT and combine the scale matrix there with the one here so it's only 1 step instead of 2)
// quantize (process many blocks at a time with paralell inside each block too)
// find pos non zero (paralell many blocks but serial inside block)
// start counting from back and stop at first non-zero value, can skip most of the block then
}
/*
data is (width * height) * 3
data[i] = r, data[i + 1] = g, data[i + 2] = b
Y is stored as (width/8 * height/8) * (8x8 Y block)
etc for Cb, Cr
*/
void convertRGBtoYCbCr444(uint8_t* data, const int width, const int height, float* Y, float* Cb, float* Cr);
{
// Y = rgb2Y(data)
// Y = Y - 128.f, probably in the same kernel so we dont need a deviceSynchronize
// Cb = rgb2Cb(data)
// Cr = rgb2Cr(data)
// hipDeviceSynchronize
}
/*
Y is stored as (width/8 * height/8) * (8x8 Y block)
Cb/Cr is stored as (width/16 * height/16) * (1 Cb 8x8 block / 1 Cr 8x8 block)
*/
void convertRGBtoYCbCr420(uint8_t* data, const int width, const int height, float* Y, float* Cb, float* Cr)
{
// Y = rgb2Y(data)
// Y = Y - 128.f, probably in the same kernel so we dont need a deviceSynchronize
// downscale RGB to 1/4 size with averages
// wait for the downscale kernel to finish
// Cb = rgb2Cb(data)
// Cr = rgb2Cr(data)
// hipDeviceSynchronize
}
/*
data is n * (width * height),
Y is returned in data
*/
void convertBWtoY(uint8_t* data, const int width, const int height, )
{
// Y = pixel - 128.f but in CUDA
}
/*
writes and huffman encodes the block
*/
int16_t writeBlock(BitWriter& writer, float block[8][8],int16_t lastDC,
const BitCode huffmanDC[256], const BitCode huffmanAC[256], const BitCode* codewords, int posNonZero)
{
auto block64 = (float*) block;
/*
Step 5: Begin HuffmanEncoding
Paralellizability: none, each block depends on previous
Status: not done
*/
// same "average color" as previous block ?
auto DC = int(block64[0] + (block64[0] >= 0 ? +0.5f : -0.5f));
auto diff = DC - lastDC;
if (diff == 0)
writer << huffmanDC[0x00]; // yes, write a special short symbol
else
{
auto bits = codewords[diff]; // nope, encode the difference to previous block's average color
writer << huffmanDC[bits.numBits] << bits;
}
/*
Step 6: Write the huffman encoded bits
Paralellizability: none (file io, bytes must be written in order)
Status: not done
*/
// encode ACs (quantized[1..63])
auto offset = 0; // upper 4 bits count the number of consecutive zeros
for (auto i = 1; i <= posNonZero; i++) // quantized[0] was already written, skip all trailing zeros, too
{
// zeros are encoded in a special way
while (quantized[i] == 0) // found another zero ?
{
offset += 0x10; // add 1 to the upper 4 bits
// split into blocks of at most 16 consecutive zeros
if (offset > 0xF0) // remember, the counter is in the upper 4 bits, 0xF = 15
{
writer << huffmanAC[0xF0]; // 0xF0 is a special code for "16 zeros"
offset = 0;
}
i++;
}
auto encoded = codewords[quantized[i]];
// combine number of zeros with the number of bits of the next non-zero value
writer << huffmanAC[offset + encoded.numBits] << encoded; // and the value itself
offset = 0;
}
// send end-of-block code (0x00), only needed if there are trailing zeros
if (posNonZero < 8*8 - 1) // = 63
writer << huffmanAC[0x00];
return DC;
}
void writeBlock_many(BitWriter& writer, float* const data, const uint32_t n, const BitCode huffmanDC[256], const BitCode huffmanAC[256], const BitCode* codewords)
{
// for block in data
// compare to DC of last block
// encode non-zeros in block
// encode zeros in block
}
// Jon's code includes the pre-generated Huffman codes
// I don't like these "magic constants" and compute them on my own :-)
void generateHuffmanTable(const uint8_t numCodes[16], const uint8_t* values, BitCode result[256])
{
// process all bitsizes 1 thru 16, no JPEG Huffman code is allowed to exceed 16 bits
auto huffmanCode = 0;
for (auto numBits = 1; numBits <= 16; numBits++)
{
// ... and each code of these bitsizes
for (auto i = 0; i < numCodes[numBits - 1]; i++) // note: numCodes array starts at zero, but smallest bitsize is 1
result[*values++] = BitCode(huffmanCode++, numBits);
// next Huffman code needs to be one bit wider
huffmanCode <<= 1;
}
}
} // end of anonymous namespace
// -------------------- externally visible code --------------------
namespace TooJpeg
{
// the only exported function ...
bool writeJpeg(WRITE_ONE_BYTE output, const void* pixels_, unsigned short width, unsigned short height,
bool isRGB, unsigned char quality_, bool downsample, const char* comment)
{
// reject invalid pointers
if (output == nullptr || pixels_ == nullptr)
return false;
// check image format
if (width == 0 || height == 0)
return false;
// number of components
const auto numComponents = isRGB ? 3 : 1;
// note: if there is just one component (=grayscale), then only luminance needs to be stored in the file
// thus everything related to chrominance need not to be written to the JPEG
// I still compute a few things, like quantization tables to avoid a complete code mess
// grayscale images can't be downsampled (because there are no Cb + Cr channels)
if (!isRGB)
downsample = false;
// wrapper for all output operations
BitWriter bitWriter(output);
// ////////////////////////////////////////
// JFIF headers
const uint8_t HeaderJfif[2+2+16] =
{ 0xFF,0xD8, // SOI marker (start of image)
0xFF,0xE0, // JFIF APP0 tag
0,16, // length: 16 bytes (14 bytes payload + 2 bytes for this length field)
'J','F','I','F',0, // JFIF identifier, zero-terminated
1,1, // JFIF version 1.1
0, // no density units specified
0,1,0,1, // density: 1 pixel "per pixel" horizontally and vertically
0,0 }; // no thumbnail (size 0 x 0)
bitWriter << HeaderJfif;
// ////////////////////////////////////////
// comment (optional)
if (comment != nullptr)
{
// look for zero terminator
auto length = 0; // = strlen(comment);
while (comment[length] != 0)
length++;
// write COM marker
bitWriter.addMarker(0xFE, 2+length); // block size is number of bytes (without zero terminator) + 2 bytes for this length field
// ... and write the comment itself
for (auto i = 0; i < length; i++)
bitWriter << comment[i];
}
// ////////////////////////////////////////
// adjust quantization tables to desired quality
// quality level must be in 1 ... 100
auto quality = clamp<uint16_t>(quality_, 1, 100);
// convert to an internal JPEG quality factor, formula taken from libjpeg
quality = quality < 50 ? 5000 / quality : 200 - quality * 2;
/* Probably not worth paralellizing this step since it's only 64 loops */
uint8_t quantLuminance [8*8];
uint8_t quantChrominance[8*8];
for (auto i = 0; i < 8*8; i++)
{
int luminance = (DefaultQuantLuminance [ZigZagInv[i]] * quality + 50) / 100;
int chrominance = (DefaultQuantChrominance[ZigZagInv[i]] * quality + 50) / 100;
// clamp to 1..255
quantLuminance [i] = clamp(luminance, 1, 255);
quantChrominance[i] = clamp(chrominance, 1, 255);
}
// write quantization tables
bitWriter.addMarker(0xDB, 2 + (isRGB ? 2 : 1) * (1 + 8*8)); // length: 65 bytes per table + 2 bytes for this length field
// each table has 64 entries and is preceded by an ID byte
bitWriter << 0x00 << quantLuminance; // first quantization table
if (isRGB)
bitWriter << 0x01 << quantChrominance; // second quantization table, only relevant for color images
// ////////////////////////////////////////
// write image infos (SOF0 - start of frame)
bitWriter.addMarker(0xC0, 2+6+3*numComponents); // length: 6 bytes general info + 3 per channel + 2 bytes for this length field
// 8 bits per channel
bitWriter << 0x08
// image dimensions (big-endian)
<< (height >> 8) << (height & 0xFF)
<< (width >> 8) << (width & 0xFF);
// sampling and quantization tables for each component
bitWriter << numComponents; // 1 component (grayscale, Y only) or 3 components (Y,Cb,Cr)
for (auto id = 1; id <= numComponents; id++)
bitWriter << id // component ID (Y=1, Cb=2, Cr=3)
// bitmasks for sampling: highest 4 bits: horizontal, lowest 4 bits: vertical
<< (id == 1 && downsample ? 0x22 : 0x11) // 0x11 is default YCbCr 4:4:4 and 0x22 stands for YCbCr 4:2:0
<< (id == 1 ? 0 : 1); // use quantization table 0 for Y, table 1 for Cb and Cr
// ////////////////////////////////////////
// Huffman tables
// DHT marker - define Huffman tables
bitWriter.addMarker(0xC4, isRGB ? (2+208+208) : (2+208));
// 2 bytes for the length field, store chrominance only if needed
// 1+16+12 for the DC luminance
// 1+16+162 for the AC luminance (208 = 1+16+12 + 1+16+162)
// 1+16+12 for the DC chrominance
// 1+16+162 for the AC chrominance (208 = 1+16+12 + 1+16+162, same as above)
// store luminance's DC+AC Huffman table definitions
bitWriter << 0x00 // highest 4 bits: 0 => DC, lowest 4 bits: 0 => Y (baseline)
<< DcLuminanceCodesPerBitsize
<< DcLuminanceValues;
bitWriter << 0x10 // highest 4 bits: 1 => AC, lowest 4 bits: 0 => Y (baseline)
<< AcLuminanceCodesPerBitsize
<< AcLuminanceValues;
// compute actual Huffman code tables (see Jon's code for precalculated tables)
BitCode huffmanLuminanceDC[256];
BitCode huffmanLuminanceAC[256];
generateHuffmanTable(DcLuminanceCodesPerBitsize, DcLuminanceValues, huffmanLuminanceDC);
generateHuffmanTable(AcLuminanceCodesPerBitsize, AcLuminanceValues, huffmanLuminanceAC);
// chrominance is only relevant for color images
BitCode huffmanChrominanceDC[256];
BitCode huffmanChrominanceAC[256];
if (isRGB)
{
// store luminance's DC+AC Huffman table definitions
bitWriter << 0x01 // highest 4 bits: 0 => DC, lowest 4 bits: 1 => Cr,Cb (baseline)
<< DcChrominanceCodesPerBitsize
<< DcChrominanceValues;
bitWriter << 0x11 // highest 4 bits: 1 => AC, lowest 4 bits: 1 => Cr,Cb (baseline)
<< AcChrominanceCodesPerBitsize
<< AcChrominanceValues;
// compute actual Huffman code tables (see Jon's code for precalculated tables)
generateHuffmanTable(DcChrominanceCodesPerBitsize, DcChrominanceValues, huffmanChrominanceDC);
generateHuffmanTable(AcChrominanceCodesPerBitsize, AcChrominanceValues, huffmanChrominanceAC);
}
// ////////////////////////////////////////
// start of scan (there is only a single scan for baseline JPEGs)
bitWriter.addMarker(0xDA, 2+1+2*numComponents+3); // 2 bytes for the length field, 1 byte for number of components,
// then 2 bytes for each component and 3 bytes for spectral selection
// assign Huffman tables to each component
bitWriter << numComponents;
for (auto id = 1; id <= numComponents; id++)
// highest 4 bits: DC Huffman table, lowest 4 bits: AC Huffman table
bitWriter << id << (id == 1 ? 0x00 : 0x11); // Y: tables 0 for DC and AC; Cb + Cr: tables 1 for DC and AC
// constant values for our baseline JPEGs (which have a single sequential scan)
static const uint8_t Spectral[3] = { 0, 63, 0 }; // spectral selection: must be from 0 to 63; successive approximation must be 0
bitWriter << Spectral;
// ////////////////////////////////////////
// adjust quantization tables with AAN scaling factors to simplify DCT
float scaledLuminance [8*8];
float scaledChrominance[8*8];
for (auto i = 0; i < 8*8; i++)
{
auto row = ZigZagInv[i] / 8; // same as ZigZagInv[i] >> 3
auto column = ZigZagInv[i] % 8; // same as ZigZagInv[i] & 7
// scaling constants for AAN DCT algorithm: AanScaleFactors[0] = 1, AanScaleFactors[k=1..7] = cos(k*PI/16) * sqrt(2)
static const float AanScaleFactors[8] = { 1, 1.387039845f, 1.306562965f, 1.175875602f, 1, 0.785694958f, 0.541196100f, 0.275899379f };
auto factor = 1 / (AanScaleFactors[row] * AanScaleFactors[column] * 8);
scaledLuminance [ZigZagInv[i]] = factor / quantLuminance [i];
scaledChrominance[ZigZagInv[i]] = factor / quantChrominance[i];
// if you really want JPEGs that are bitwise identical to Jon Olick's code then you need slightly different formulas (note: sqrt(8) = 2.828427125f)
//static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; // line 240 of jo_jpeg.cpp
//scaledLuminance [ZigZagInv[i]] = 1 / (quantLuminance [i] * aasf[row] * aasf[column]); // lines 266-267 of jo_jpeg.cpp
//scaledChrominance[ZigZagInv[i]] = 1 / (quantChrominance[i] * aasf[row] * aasf[column]);
}
// ////////////////////////////////////////
// precompute JPEG codewords for quantized DCT
BitCode codewordsArray[2 * CodeWordLimit]; // note: quantized[i] is found at codewordsArray[quantized[i] + CodeWordLimit]
BitCode* codewords = &codewordsArray[CodeWordLimit]; // allow negative indices, so quantized[i] is at codewords[quantized[i]]
uint8_t numBits = 1; // each codeword has at least one bit (value == 0 is undefined)
int32_t mask = 1; // mask is always 2^numBits - 1, initial value 2^1-1 = 2-1 = 1
for (int16_t value = 1; value < CodeWordLimit; value++)
{
// numBits = position of highest set bit (ignoring the sign)
// mask = (2^numBits) - 1
if (value > mask) // one more bit ?
{
numBits++;
mask = (mask << 1) | 1; // append a set bit
}
codewords[-value] = BitCode(mask - value, numBits); // note that I use a negative index => codewords[-value] = codewordsArray[CodeWordLimit value]
codewords[+value] = BitCode( value, numBits);
}
// just convert image data from void*
auto pixels = (const uint8_t*)pixels_;
// the next two variables are frequently used when checking for image borders
const auto maxWidth = width - 1; // "last row"
const auto maxHeight = height - 1; // "bottom line"
// process MCUs (minimum codes units) => image is subdivided into a grid of 8x8 or 16x16 tiles
const auto sampling = downsample ? 2 : 1; // 1x1 or 2x2 sampling
const auto mcuSize = 8 * sampling;
/*
steps taken in the loop:
Step 1: convert rgb into YCbCr
Parelellizability: strong (loops)
Step 2: Encode Y
Paralellizability: medium
if we break it up into DCT, scaling, then writing,
we can DCT and scale all the Y block in GPU then
finish the writing on the CPU
Step 3: Perform Downsampling (is applicable)
Paralellizability: strong (loops)
can move up into first step of converting rgb to YCbCr
Step 4: Encode Cb and Cr
Paralellizability: medium (see step 3)
*/
// average color of the previous MCU
int16_t lastYDC = 0, lastCbDC = 0, lastCrDC = 0;
// convert from RGB to YCbCr
float Y[8][8], Cb[8][8], Cr[8][8];
for (auto mcuY = 0; mcuY < height; mcuY += mcuSize) // each step is either 8 or 16 (=mcuSize)
for (auto mcuX = 0; mcuX < width; mcuX += mcuSize)
{
// YCbCr 4:4:4 format: each MCU is a 8x8 block - the same applies to grayscale images, too
// YCbCr 4:2:0 format: each MCU represents a 16x16 block, stored as 4x 8x8 Y-blocks plus 1x 8x8 Cb and 1x 8x8 Cr block)
for (auto blockY = 0; blockY < mcuSize; blockY += 8) // iterate once (YCbCr444 and grayscale) or twice (YCbCr420)
for (auto blockX = 0; blockX < mcuSize; blockX += 8)
{
// now we finally have an 8x8 block ...
for (auto deltaY = 0; deltaY < 8; deltaY++)
{
auto column = minimum(mcuX + blockX , maxWidth); // must not exceed image borders, replicate last row/column if needed
auto row = minimum(mcuY + blockY + deltaY, maxHeight);
for (auto deltaX = 0; deltaX < 8; deltaX++)
{
// find actual pixel position within the current image
auto pixelPos = row * int(width) + column; // the cast ensures that we don't run into multiplication overflows
if (column < maxWidth)
column++;
// grayscale images have solely a Y channel which can be easily derived from the input pixel by shifting it by 128
if (!isRGB)
{
Y[deltaY][deltaX] = pixels[pixelPos] - 128.f;
continue;
}
// RGB: 3 bytes per pixel (whereas grayscale images have only 1 byte per pixel)
auto r = pixels[3 * pixelPos ];
auto g = pixels[3 * pixelPos + 1];
auto b = pixels[3 * pixelPos + 2];
Y [deltaY][deltaX] = rgb2y (r, g, b) - 128; // again, the JPEG standard requires Y to be shifted by 128
// YCbCr444 is easy - the more complex YCbCr420 has to be computed about 20 lines below in a second pass
if (!downsample)
{
Cb[deltaY][deltaX] = rgb2cb(r, g, b); // standard RGB-to-YCbCr conversion
Cr[deltaY][deltaX] = rgb2cr(r, g, b);
}
}
}
// encode Y channel
lastYDC = encodeBlock(bitWriter, Y, scaledLuminance, lastYDC, huffmanLuminanceDC, huffmanLuminanceAC, codewords);
// Cb and Cr are encoded about 50 lines below
}
// grayscale images don't need any Cb and Cr information
if (!isRGB)
continue;
// ////////////////////////////////////////
// the following lines are only relevant for YCbCr420:
// average/downsample chrominance of four pixels while respecting the image borders
if (downsample)
for (short deltaY = 7; downsample && deltaY >= 0; deltaY--) // iterating loop in reverse increases cache read efficiency
{
auto row = minimum(mcuY + 2*deltaY, maxHeight); // each deltaX/Y step covers a 2x2 area
auto column = mcuX; // column is updated inside next loop
auto pixelPos = (row * int(width) + column) * 3; // numComponents = 3
// deltas (in bytes) to next row / column, must not exceed image borders
auto rowStep = (row < maxHeight) ? 3 * int(width) : 0; // always numComponents*width except for bottom line
auto columnStep = (column < maxWidth ) ? 3 : 0; // always numComponents except for rightmost pixel
for (short deltaX = 0; deltaX < 8; deltaX++)
{
// let's add all four samples (2x2 area)
auto right = pixelPos + columnStep;
auto down = pixelPos + rowStep;
auto downRight = pixelPos + columnStep + rowStep;
// note: cast from 8 bits to >8 bits to avoid overflows when adding
auto r = short(pixels[pixelPos ]) + pixels[right ] + pixels[down ] + pixels[downRight ];
auto g = short(pixels[pixelPos + 1]) + pixels[right + 1] + pixels[down + 1] + pixels[downRight + 1];
auto b = short(pixels[pixelPos + 2]) + pixels[right + 2] + pixels[down + 2] + pixels[downRight + 2];
// convert to Cb and Cr
Cb[deltaY][deltaX] = rgb2cb(r, g, b) / 4; // I still have to divide r,g,b by 4 to get their average values
Cr[deltaY][deltaX] = rgb2cr(r, g, b) / 4; // it's a bit faster if done AFTER CbCr conversion
// step forward to next 2x2 area
pixelPos += 2*3; // 2 pixels => 6 bytes (2*numComponents)
column += 2;
// reached right border ?
if (column >= maxWidth)
{
columnStep = 0;
pixelPos = ((row + 1) * int(width) - 1) * 3; // same as (row * width + maxWidth) * numComponents => current's row last pixel
}
}
} // end of YCbCr420 code for Cb and Cr
// encode Cb and Cr
lastCbDC = encodeBlock(bitWriter, Cb, scaledChrominance, lastCbDC, huffmanChrominanceDC, huffmanChrominanceAC, codewords);
lastCrDC = encodeBlock(bitWriter, Cr, scaledChrominance, lastCrDC, huffmanChrominanceDC, huffmanChrominanceAC, codewords);
}
bitWriter.flush(); // now image is completely encoded, write any bits still left in the buffer
// ///////////////////////////
// EOI marker
bitWriter << 0xFF << 0xD9; // this marker has no length, therefore I can't use addMarker()
return true;
} // writeJpeg()
} // namespace TooJpeg
| 2484d60d06e7d520940f2c6cf3500f58e0700f31.cu | // //////////////////////////////////////////////////////////
// toojpeg.cpp
// written by Stephan Brumme, 2018-2019
// see https://create.stephan-brumme.com/toojpeg/
//
#include "toojpeg_modified.h"
// - the "official" specifications: https://www.w3.org/Graphics/JPEG/itu-t81.pdf and https://www.w3.org/Graphics/JPEG/jfif3.pdf
// - Wikipedia has a short description of the JFIF/JPEG file format: https://en.wikipedia.org/wiki/JPEG_File_Interchange_Format
// - the popular STB Image library includes Jon's JPEG encoder as well: https://github.com/nothings/stb/blob/master/stb_image_write.h
// - the most readable JPEG book (from a developer's perspective) is Miano's "Compressed Image File Formats" (1999, ISBN 0-201-60443-4),
// used copies are really cheap nowadays and include a CD with C++ sources as well (plus great format descriptions of GIF & PNG)
// - much more detailled is Mitchell/Pennebaker's "JPEG: Still Image Data Compression Standard" (1993, ISBN 0-442-01272-1)
// which contains the official JPEG standard, too - fun fact: I bought a signed copy in a second-hand store without noticing
namespace // anonymous namespace to hide local functions / constants / etc.
{
// ////////////////////////////////////////
// data types
using uint8_t = unsigned char;
using uint16_t = unsigned short;
using int16_t = short;
using int32_t = int; // at least four bytes
// ////////////////////////////////////////
// constants
// quantization tables from JPEG Standard, Annex K
const uint8_t DefaultQuantLuminance[8*8] =
{ 16, 11, 10, 16, 24, 40, 51, 61, // there are a few experts proposing slightly more efficient values,
12, 12, 14, 19, 26, 58, 60, 55, // e.g. https://www.imagemagick.org/discourse-server/viewtopic.php?t=20333
14, 13, 16, 24, 40, 57, 69, 56, // btw: Google's Guetzli project optimizes the quantization tables per image
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68,109,103, 77,
24, 35, 55, 64, 81,104,113, 92,
49, 64, 78, 87,103,121,120,101,
72, 92, 95, 98,112,100,103, 99 };
const uint8_t DefaultQuantChrominance[8*8] =
{ 17, 18, 24, 47, 99, 99, 99, 99,
18, 21, 26, 66, 99, 99, 99, 99,
24, 26, 56, 99, 99, 99, 99, 99,
47, 66, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99 };
const float dct_matrix[8][8] = {
{ 0.353553, 0.353553, 0.353553, 0.353553, 0.353553, 0.353553, 0.353553,0.353553},
{ 0.490393, 0.415735, 0.277785, 0.0975452, -0.0975452, -0.277785, -0.415735,-0.490393},
{ 0.46194, 0.191342, -0.191342, -0.46194, -0.46194, -0.191342, 0.191342,0.46194},
{ 0.415735, -0.0975452, -0.490393, -0.277785, 0.277785, 0.490393, 0.0975453,-0.415735},
{ 0.353553, -0.353553, -0.353553, 0.353553, 0.353553, -0.353553, -0.353553,0.353553},
{ 0.277785, -0.490393, 0.0975452, 0.415735, -0.415735, -0.0975451, 0.490393,-0.277785},
{ 0.191342, -0.46194, 0.46194, -0.191342, -0.191342, 0.46194, -0.46194,0.191342},
{ 0.0975452, -0.277785, 0.415735, -0.490393, 0.490393, -0.415735, 0.277785,-0.0975448}
};
const float dct_matrix_transpose[8][8] = {
{ 0.353553, 0.490393, 0.46194, 0.415735, 0.353553, 0.277785, 0.191342,0.0975452},
{ 0.353553, 0.415735, 0.191342, -0.0975452, -0.353553, -0.490393, -0.46194,-0.277785},
{ 0.353553, 0.277785, -0.191342, -0.490393, -0.353553, 0.0975452, 0.46194,0.415735},
{ 0.353553, 0.0975452, -0.46194, -0.277785, 0.353553, 0.415735, -0.191342,-0.490393},
{ 0.353553, -0.0975452, -0.46194, 0.277785, 0.353553, -0.415735, -0.191342,0.490393},
{ 0.353553, -0.277785, -0.191342, 0.490393, -0.353553, -0.0975451, 0.46194,-0.415735},
{ 0.353553, -0.415735, 0.191342, 0.0975453, -0.353553, 0.490393, -0.46194,0.277785},
{ 0.353553, -0.490393, 0.46194, -0.415735, 0.353553, -0.277785, 0.191342,-0.0975448}
};
const float dct_correction_matrix[8][8] = { // combine with the other scale matrix
{8.00000, 11.09631, 7.52311, 9.40692, 6.19024, 6.28556, 2.81439, 2.20719},
{11.09631, 1, 1, 1, 1, 1, 1, 1},
{9.05127, 1, 1, 1, 1, 1, 1, 1},
{9.40699, 1, 1, 1, 1, 1, 1, 1},
{4.14146, 1, 1, 1, 1, 1, 1, 1},
{6.28555, 1, 1, 1, 1, 1, 1, 1},
{3.48541, 1, 1, 1, 1, 1, 1, 1},
{2.20719, 1, 1, 1, 1, 1, 1, 1}
};
// 8x8 blocks are processed in zig-zag order
// most encoders use a zig-zag "forward" table, I switched to its inverse for performance reasons
// note: ZigZagInv[ZigZag[i]] = i
const uint8_t ZigZagInv[8*8] =
{ 0, 1, 8,16, 9, 2, 3,10, // ZigZag[] = 0, 1, 5, 6,14,15,27,28,
17,24,32,25,18,11, 4, 5, // 2, 4, 7,13,16,26,29,42,
12,19,26,33,40,48,41,34, // 3, 8,12,17,25,30,41,43,
27,20,13, 6, 7,14,21,28, // 9,11,18,24,31,40,44,53,
35,42,49,56,57,50,43,36, // 10,19,23,32,39,45,52,54,
29,22,15,23,30,37,44,51, // 20,22,33,38,46,51,55,60,
58,59,52,45,38,31,39,46, // 21,34,37,47,50,56,59,61,
53,60,61,54,47,55,62,63 }; // 35,36,48,49,57,58,62,63
// static Huffman code tables from JPEG standard Annex K
// - CodesPerBitsize tables define how many Huffman codes will have a certain bitsize (plus 1 because there nothing with zero bits),
// e.g. DcLuminanceCodesPerBitsize[2] = 5 because there are 5 Huffman codes being 2+1=3 bits long
// - Values tables are a list of values ordered by their Huffman code bitsize,
// e.g. AcLuminanceValues => Huffman(0x01,0x02 and 0x03) will have 2 bits, Huffman(0x00) will have 3 bits, Huffman(0x04,0x11 and 0x05) will have 4 bits, ...
// Huffman definitions for first DC/AC tables (luminance / Y channel)
const uint8_t DcLuminanceCodesPerBitsize[16] = { 0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; // sum = 12
const uint8_t DcLuminanceValues [12] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; // => 12 codes
const uint8_t AcLuminanceCodesPerBitsize[16] = { 0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,125 }; // sum = 162
const uint8_t AcLuminanceValues [162] = // => 162 codes
{ 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xA1,0x08, // 16*10+2 symbols because
0x23,0x42,0xB1,0xC1,0x15,0x52,0xD1,0xF0,0x24,0x33,0x62,0x72,0x82,0x09,0x0A,0x16,0x17,0x18,0x19,0x1A,0x25,0x26,0x27,0x28, // upper 4 bits can be 0..F
0x29,0x2A,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x53,0x54,0x55,0x56,0x57,0x58,0x59, // while lower 4 bits can be 1..A
0x5A,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x83,0x84,0x85,0x86,0x87,0x88,0x89, // plus two special codes 0x00 and 0xF0
0x8A,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xB2,0xB3,0xB4,0xB5,0xB6, // order of these symbols was determined empirically by JPEG committee
0xB7,0xB8,0xB9,0xBA,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xE1,0xE2,
0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA };
// Huffman definitions for second DC/AC tables (chrominance / Cb and Cr channels)
const uint8_t DcChrominanceCodesPerBitsize[16] = { 0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; // sum = 12
const uint8_t DcChrominanceValues [12] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; // => 12 codes (identical to DcLuminanceValues)
const uint8_t AcChrominanceCodesPerBitsize[16] = { 0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,119 }; // sum = 162
const uint8_t AcChrominanceValues [162] = // => 162 codes
{ 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, // same number of symbol, just different order
0xA1,0xB1,0xC1,0x09,0x23,0x33,0x52,0xF0,0x15,0x62,0x72,0xD1,0x0A,0x16,0x24,0x34,0xE1,0x25,0xF1,0x17,0x18,0x19,0x1A,0x26, // (which is more efficient for AC coding)
0x27,0x28,0x29,0x2A,0x35,0x36,0x37,0x38,0x39,0x3A,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x53,0x54,0x55,0x56,0x57,0x58,
0x59,0x5A,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x82,0x83,0x84,0x85,0x86,0x87,
0x88,0x89,0x8A,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xB2,0xB3,0xB4,
0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,
0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA };
const int16_t CodeWordLimit = 2048; // +/-2^11, maximum value after DCT
// ////////////////////////////////////////
// structs
// represent a single Huffman code
struct BitCode
{
BitCode() = default; // undefined state, must be initialized at a later time
BitCode(uint16_t code_, uint8_t numBits_)
: code(code_), numBits(numBits_) {}
uint16_t code; // JPEG's Huffman codes are limited to 16 bits
uint8_t numBits; // number of valid bits
};
// wrapper for bit output operations
struct BitWriter
{
// user-supplied callback that writes/stores one byte
TooJpeg::WRITE_ONE_BYTE output;
// initialize writer
explicit BitWriter(TooJpeg::WRITE_ONE_BYTE output_) : output(output_) {}
// store the most recently encoded bits that are not written yet
struct BitBuffer
{
int32_t data = 0; // actually only at most 24 bits are used
uint8_t numBits = 0; // number of valid bits (the right-most bits)
} buffer;
// write Huffman bits stored in BitCode, keep excess bits in BitBuffer
BitWriter& operator<<(const BitCode& data)
{
// append the new bits to those bits leftover from previous call(s)
buffer.numBits += data.numBits;
buffer.data <<= data.numBits;
buffer.data |= data.code;
// write all "full" bytes
while (buffer.numBits >= 8)
{
// extract highest 8 bits
buffer.numBits -= 8;
auto oneByte = uint8_t(buffer.data >> buffer.numBits);
output(oneByte);
if (oneByte == 0xFF) // 0xFF has a special meaning for JPEGs (it's a block marker)
output(0); // therefore pad a zero to indicate "nope, this one ain't a marker, it's just a coincidence"
// note: I don't clear those written bits, therefore buffer.bits may contain garbage in the high bits
// if you really want to "clean up" (e.g. for debugging purposes) then uncomment the following line
//buffer.bits &= (1 << buffer.numBits) - 1;
}
return *this;
}
// write all non-yet-written bits, fill gaps with 1s (that's a strange JPEG thing)
void flush()
{
// at most seven set bits needed to "fill" the last byte: 0x7F = binary 0111 1111
*this << BitCode(0x7F, 7); // I should set buffer.numBits = 0 but since there are no single bits written after flush() I can safely ignore it
}
// NOTE: all the following BitWriter functions IGNORE the BitBuffer and write straight to output !
// write a single byte
BitWriter& operator<<(uint8_t oneByte)
{
output(oneByte);
return *this;
}
// write an array of bytes
template <typename T, int Size>
BitWriter& operator<<(T (&manyBytes)[Size])
{
for (auto c : manyBytes)
output(c);
return *this;
}
// start a new JFIF block
void addMarker(uint8_t id, uint16_t length)
{
output(0xFF); output(id); // ID, always preceded by 0xFF
output(uint8_t(length >> 8)); // length of the block (big-endian, includes the 2 length bytes as well)
output(uint8_t(length & 0xFF));
}
};
// ////////////////////////////////////////
// functions / templates
// same as std::min()
template <typename Number>
Number minimum(Number value, Number maximum)
{
return value <= maximum ? value : maximum;
}
// restrict a value to the interval [minimum, maximum]
template <typename Number, typename Limit>
Number clamp(Number value, Limit minValue, Limit maxValue)
{
if (value <= minValue) return minValue; // never smaller than the minimum
if (value >= maxValue) return maxValue; // never bigger than the maximum
return value; // value was inside interval, keep it
}
// convert from RGB to YCbCr, constants are similar to ITU-R, see https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion
float rgb2y (float r, float g, float b) { return +0.299f * r +0.587f * g +0.114f * b; }
float rgb2cb(float r, float g, float b) { return -0.16874f * r -0.33126f * g +0.5f * b; }
float rgb2cr(float r, float g, float b) { return +0.5f * r -0.41869f * g -0.08131f * b; }
/*
placeholder for GPU accelerated version
*/
void transformBlock(float block[8][8], const float scaled[8*8], int16_t quantized[8*8])
{
/*
STEP 1: DCT
Paralellizability: strong (matmul)
Status: done, needs implementing
*/
/* gpu accelerated 8x8 DCT function */
DCT_8x8(block);
// "linearize" the 8x8 block, treat it as a flat array of 64 floats
auto block64 = (float*) block;
/*
Step 2: Scale
Paralellizability: Strong (for loop)
Status: not done
*/
for (auto i = 0; i < 8*8; i++)
block64[i] *= scaled[i];
// quantize and zigzag the other 63 coefficients
auto posNonZero = 0; // find last coefficient which is not zero (because trailing zeros are encoded differently)
/*
Step 3: Quantization
Paralellizability: Strong (double for loop)
Status: not done
*/
for (auto i = 0; i < 8*8; i++)
{
auto value = block64[ZigZagInv[i]];
// round to nearest integer
quantized[i] = int(value + (value >= 0 ? +0.5f : -0.5f)); // C++11's nearbyint() achieves a similar effect
// remember offset of last non-zero coefficient
if (quantized[i] != 0)
posNonZero = i;
}
}
/*
data is an aray of n 8*8 blocks
scale is an array of 8*8
posNonZero will store the position of the last non zero value for each N blocks
n is the number of blocks
*/
void transformBlock_many(float* const data, const float* const scale, float* const posNonZero, const uint32_t n)
{
// DCT
// Scale (remove scale step from DCT and combine the scale matrix there with the one here so it's only 1 step instead of 2)
// quantize (process many blocks at a time with paralell inside each block too)
// find pos non zero (paralell many blocks but serial inside block)
// start counting from back and stop at first non-zero value, can skip most of the block then
}
/*
data is (width * height) * 3
data[i] = r, data[i + 1] = g, data[i + 2] = b
Y is stored as (width/8 * height/8) * (8x8 Y block)
etc for Cb, Cr
*/
void convertRGBtoYCbCr444(uint8_t* data, const int width, const int height, float* Y, float* Cb, float* Cr);
{
// Y = rgb2Y(data)
// Y = Y - 128.f, probably in the same kernel so we dont need a deviceSynchronize
// Cb = rgb2Cb(data)
// Cr = rgb2Cr(data)
// cudaDeviceSynchronize
}
/*
Y is stored as (width/8 * height/8) * (8x8 Y block)
Cb/Cr is stored as (width/16 * height/16) * (1 Cb 8x8 block / 1 Cr 8x8 block)
*/
void convertRGBtoYCbCr420(uint8_t* data, const int width, const int height, float* Y, float* Cb, float* Cr)
{
// Y = rgb2Y(data)
// Y = Y - 128.f, probably in the same kernel so we dont need a deviceSynchronize
// downscale RGB to 1/4 size with averages
// wait for the downscale kernel to finish
// Cb = rgb2Cb(data)
// Cr = rgb2Cr(data)
// cudaDeviceSynchronize
}
/*
data is n * (width * height),
Y is returned in data
*/
void convertBWtoY(uint8_t* data, const int width, const int height, )
{
// Y = pixel - 128.f but in CUDA
}
/*
writes and huffman encodes the block
*/
int16_t writeBlock(BitWriter& writer, float block[8][8],int16_t lastDC,
const BitCode huffmanDC[256], const BitCode huffmanAC[256], const BitCode* codewords, int posNonZero)
{
auto block64 = (float*) block;
/*
Step 5: Begin HuffmanEncoding
Paralellizability: none, each block depends on previous
Status: not done
*/
// same "average color" as previous block ?
auto DC = int(block64[0] + (block64[0] >= 0 ? +0.5f : -0.5f));
auto diff = DC - lastDC;
if (diff == 0)
writer << huffmanDC[0x00]; // yes, write a special short symbol
else
{
auto bits = codewords[diff]; // nope, encode the difference to previous block's average color
writer << huffmanDC[bits.numBits] << bits;
}
/*
Step 6: Write the huffman encoded bits
Paralellizability: none (file io, bytes must be written in order)
Status: not done
*/
// encode ACs (quantized[1..63])
auto offset = 0; // upper 4 bits count the number of consecutive zeros
for (auto i = 1; i <= posNonZero; i++) // quantized[0] was already written, skip all trailing zeros, too
{
// zeros are encoded in a special way
while (quantized[i] == 0) // found another zero ?
{
offset += 0x10; // add 1 to the upper 4 bits
// split into blocks of at most 16 consecutive zeros
if (offset > 0xF0) // remember, the counter is in the upper 4 bits, 0xF = 15
{
writer << huffmanAC[0xF0]; // 0xF0 is a special code for "16 zeros"
offset = 0;
}
i++;
}
auto encoded = codewords[quantized[i]];
// combine number of zeros with the number of bits of the next non-zero value
writer << huffmanAC[offset + encoded.numBits] << encoded; // and the value itself
offset = 0;
}
// send end-of-block code (0x00), only needed if there are trailing zeros
if (posNonZero < 8*8 - 1) // = 63
writer << huffmanAC[0x00];
return DC;
}
void writeBlock_many(BitWriter& writer, float* const data, const uint32_t n, const BitCode huffmanDC[256], const BitCode huffmanAC[256], const BitCode* codewords)
{
// for block in data
// compare to DC of last block
// encode non-zeros in block
// encode zeros in block
}
// Jon's code includes the pre-generated Huffman codes
// I don't like these "magic constants" and compute them on my own :-)
void generateHuffmanTable(const uint8_t numCodes[16], const uint8_t* values, BitCode result[256])
{
// process all bitsizes 1 thru 16, no JPEG Huffman code is allowed to exceed 16 bits
auto huffmanCode = 0;
for (auto numBits = 1; numBits <= 16; numBits++)
{
// ... and each code of these bitsizes
for (auto i = 0; i < numCodes[numBits - 1]; i++) // note: numCodes array starts at zero, but smallest bitsize is 1
result[*values++] = BitCode(huffmanCode++, numBits);
// next Huffman code needs to be one bit wider
huffmanCode <<= 1;
}
}
} // end of anonymous namespace
// -------------------- externally visible code --------------------
namespace TooJpeg
{
// the only exported function ...
bool writeJpeg(WRITE_ONE_BYTE output, const void* pixels_, unsigned short width, unsigned short height,
bool isRGB, unsigned char quality_, bool downsample, const char* comment)
{
// reject invalid pointers
if (output == nullptr || pixels_ == nullptr)
return false;
// check image format
if (width == 0 || height == 0)
return false;
// number of components
const auto numComponents = isRGB ? 3 : 1;
// note: if there is just one component (=grayscale), then only luminance needs to be stored in the file
// thus everything related to chrominance need not to be written to the JPEG
// I still compute a few things, like quantization tables to avoid a complete code mess
// grayscale images can't be downsampled (because there are no Cb + Cr channels)
if (!isRGB)
downsample = false;
// wrapper for all output operations
BitWriter bitWriter(output);
// ////////////////////////////////////////
// JFIF headers
const uint8_t HeaderJfif[2+2+16] =
{ 0xFF,0xD8, // SOI marker (start of image)
0xFF,0xE0, // JFIF APP0 tag
0,16, // length: 16 bytes (14 bytes payload + 2 bytes for this length field)
'J','F','I','F',0, // JFIF identifier, zero-terminated
1,1, // JFIF version 1.1
0, // no density units specified
0,1,0,1, // density: 1 pixel "per pixel" horizontally and vertically
0,0 }; // no thumbnail (size 0 x 0)
bitWriter << HeaderJfif;
// ////////////////////////////////////////
// comment (optional)
if (comment != nullptr)
{
// look for zero terminator
auto length = 0; // = strlen(comment);
while (comment[length] != 0)
length++;
// write COM marker
bitWriter.addMarker(0xFE, 2+length); // block size is number of bytes (without zero terminator) + 2 bytes for this length field
// ... and write the comment itself
for (auto i = 0; i < length; i++)
bitWriter << comment[i];
}
// ////////////////////////////////////////
// adjust quantization tables to desired quality
// quality level must be in 1 ... 100
auto quality = clamp<uint16_t>(quality_, 1, 100);
// convert to an internal JPEG quality factor, formula taken from libjpeg
quality = quality < 50 ? 5000 / quality : 200 - quality * 2;
/* Probably not worth paralellizing this step since it's only 64 loops */
uint8_t quantLuminance [8*8];
uint8_t quantChrominance[8*8];
for (auto i = 0; i < 8*8; i++)
{
int luminance = (DefaultQuantLuminance [ZigZagInv[i]] * quality + 50) / 100;
int chrominance = (DefaultQuantChrominance[ZigZagInv[i]] * quality + 50) / 100;
// clamp to 1..255
quantLuminance [i] = clamp(luminance, 1, 255);
quantChrominance[i] = clamp(chrominance, 1, 255);
}
// write quantization tables
bitWriter.addMarker(0xDB, 2 + (isRGB ? 2 : 1) * (1 + 8*8)); // length: 65 bytes per table + 2 bytes for this length field
// each table has 64 entries and is preceded by an ID byte
bitWriter << 0x00 << quantLuminance; // first quantization table
if (isRGB)
bitWriter << 0x01 << quantChrominance; // second quantization table, only relevant for color images
// ////////////////////////////////////////
// write image infos (SOF0 - start of frame)
bitWriter.addMarker(0xC0, 2+6+3*numComponents); // length: 6 bytes general info + 3 per channel + 2 bytes for this length field
// 8 bits per channel
bitWriter << 0x08
// image dimensions (big-endian)
<< (height >> 8) << (height & 0xFF)
<< (width >> 8) << (width & 0xFF);
// sampling and quantization tables for each component
bitWriter << numComponents; // 1 component (grayscale, Y only) or 3 components (Y,Cb,Cr)
for (auto id = 1; id <= numComponents; id++)
bitWriter << id // component ID (Y=1, Cb=2, Cr=3)
// bitmasks for sampling: highest 4 bits: horizontal, lowest 4 bits: vertical
<< (id == 1 && downsample ? 0x22 : 0x11) // 0x11 is default YCbCr 4:4:4 and 0x22 stands for YCbCr 4:2:0
<< (id == 1 ? 0 : 1); // use quantization table 0 for Y, table 1 for Cb and Cr
// ////////////////////////////////////////
// Huffman tables
// DHT marker - define Huffman tables
bitWriter.addMarker(0xC4, isRGB ? (2+208+208) : (2+208));
// 2 bytes for the length field, store chrominance only if needed
// 1+16+12 for the DC luminance
// 1+16+162 for the AC luminance (208 = 1+16+12 + 1+16+162)
// 1+16+12 for the DC chrominance
// 1+16+162 for the AC chrominance (208 = 1+16+12 + 1+16+162, same as above)
// store luminance's DC+AC Huffman table definitions
bitWriter << 0x00 // highest 4 bits: 0 => DC, lowest 4 bits: 0 => Y (baseline)
<< DcLuminanceCodesPerBitsize
<< DcLuminanceValues;
bitWriter << 0x10 // highest 4 bits: 1 => AC, lowest 4 bits: 0 => Y (baseline)
<< AcLuminanceCodesPerBitsize
<< AcLuminanceValues;
// compute actual Huffman code tables (see Jon's code for precalculated tables)
BitCode huffmanLuminanceDC[256];
BitCode huffmanLuminanceAC[256];
generateHuffmanTable(DcLuminanceCodesPerBitsize, DcLuminanceValues, huffmanLuminanceDC);
generateHuffmanTable(AcLuminanceCodesPerBitsize, AcLuminanceValues, huffmanLuminanceAC);
// chrominance is only relevant for color images
BitCode huffmanChrominanceDC[256];
BitCode huffmanChrominanceAC[256];
if (isRGB)
{
// store luminance's DC+AC Huffman table definitions
bitWriter << 0x01 // highest 4 bits: 0 => DC, lowest 4 bits: 1 => Cr,Cb (baseline)
<< DcChrominanceCodesPerBitsize
<< DcChrominanceValues;
bitWriter << 0x11 // highest 4 bits: 1 => AC, lowest 4 bits: 1 => Cr,Cb (baseline)
<< AcChrominanceCodesPerBitsize
<< AcChrominanceValues;
// compute actual Huffman code tables (see Jon's code for precalculated tables)
generateHuffmanTable(DcChrominanceCodesPerBitsize, DcChrominanceValues, huffmanChrominanceDC);
generateHuffmanTable(AcChrominanceCodesPerBitsize, AcChrominanceValues, huffmanChrominanceAC);
}
// ////////////////////////////////////////
// start of scan (there is only a single scan for baseline JPEGs)
bitWriter.addMarker(0xDA, 2+1+2*numComponents+3); // 2 bytes for the length field, 1 byte for number of components,
// then 2 bytes for each component and 3 bytes for spectral selection
// assign Huffman tables to each component
bitWriter << numComponents;
for (auto id = 1; id <= numComponents; id++)
// highest 4 bits: DC Huffman table, lowest 4 bits: AC Huffman table
bitWriter << id << (id == 1 ? 0x00 : 0x11); // Y: tables 0 for DC and AC; Cb + Cr: tables 1 for DC and AC
// constant values for our baseline JPEGs (which have a single sequential scan)
static const uint8_t Spectral[3] = { 0, 63, 0 }; // spectral selection: must be from 0 to 63; successive approximation must be 0
bitWriter << Spectral;
// ////////////////////////////////////////
// adjust quantization tables with AAN scaling factors to simplify DCT
float scaledLuminance [8*8];
float scaledChrominance[8*8];
for (auto i = 0; i < 8*8; i++)
{
auto row = ZigZagInv[i] / 8; // same as ZigZagInv[i] >> 3
auto column = ZigZagInv[i] % 8; // same as ZigZagInv[i] & 7
// scaling constants for AAN DCT algorithm: AanScaleFactors[0] = 1, AanScaleFactors[k=1..7] = cos(k*PI/16) * sqrt(2)
static const float AanScaleFactors[8] = { 1, 1.387039845f, 1.306562965f, 1.175875602f, 1, 0.785694958f, 0.541196100f, 0.275899379f };
auto factor = 1 / (AanScaleFactors[row] * AanScaleFactors[column] * 8);
scaledLuminance [ZigZagInv[i]] = factor / quantLuminance [i];
scaledChrominance[ZigZagInv[i]] = factor / quantChrominance[i];
// if you really want JPEGs that are bitwise identical to Jon Olick's code then you need slightly different formulas (note: sqrt(8) = 2.828427125f)
//static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; // line 240 of jo_jpeg.cpp
//scaledLuminance [ZigZagInv[i]] = 1 / (quantLuminance [i] * aasf[row] * aasf[column]); // lines 266-267 of jo_jpeg.cpp
//scaledChrominance[ZigZagInv[i]] = 1 / (quantChrominance[i] * aasf[row] * aasf[column]);
}
// ////////////////////////////////////////
// precompute JPEG codewords for quantized DCT
BitCode codewordsArray[2 * CodeWordLimit]; // note: quantized[i] is found at codewordsArray[quantized[i] + CodeWordLimit]
BitCode* codewords = &codewordsArray[CodeWordLimit]; // allow negative indices, so quantized[i] is at codewords[quantized[i]]
uint8_t numBits = 1; // each codeword has at least one bit (value == 0 is undefined)
int32_t mask = 1; // mask is always 2^numBits - 1, initial value 2^1-1 = 2-1 = 1
for (int16_t value = 1; value < CodeWordLimit; value++)
{
// numBits = position of highest set bit (ignoring the sign)
// mask = (2^numBits) - 1
if (value > mask) // one more bit ?
{
numBits++;
mask = (mask << 1) | 1; // append a set bit
}
codewords[-value] = BitCode(mask - value, numBits); // note that I use a negative index => codewords[-value] = codewordsArray[CodeWordLimit value]
codewords[+value] = BitCode( value, numBits);
}
// just convert image data from void*
auto pixels = (const uint8_t*)pixels_;
// the next two variables are frequently used when checking for image borders
const auto maxWidth = width - 1; // "last row"
const auto maxHeight = height - 1; // "bottom line"
// process MCUs (minimum codes units) => image is subdivided into a grid of 8x8 or 16x16 tiles
const auto sampling = downsample ? 2 : 1; // 1x1 or 2x2 sampling
const auto mcuSize = 8 * sampling;
/*
steps taken in the loop:
Step 1: convert rgb into YCbCr
Parelellizability: strong (loops)
Step 2: Encode Y
Paralellizability: medium
if we break it up into DCT, scaling, then writing,
we can DCT and scale all the Y block in GPU then
finish the writing on the CPU
Step 3: Perform Downsampling (is applicable)
Paralellizability: strong (loops)
can move up into first step of converting rgb to YCbCr
Step 4: Encode Cb and Cr
Paralellizability: medium (see step 3)
*/
// average color of the previous MCU
int16_t lastYDC = 0, lastCbDC = 0, lastCrDC = 0;
// convert from RGB to YCbCr
float Y[8][8], Cb[8][8], Cr[8][8];
for (auto mcuY = 0; mcuY < height; mcuY += mcuSize) // each step is either 8 or 16 (=mcuSize)
for (auto mcuX = 0; mcuX < width; mcuX += mcuSize)
{
// YCbCr 4:4:4 format: each MCU is a 8x8 block - the same applies to grayscale images, too
// YCbCr 4:2:0 format: each MCU represents a 16x16 block, stored as 4x 8x8 Y-blocks plus 1x 8x8 Cb and 1x 8x8 Cr block)
for (auto blockY = 0; blockY < mcuSize; blockY += 8) // iterate once (YCbCr444 and grayscale) or twice (YCbCr420)
for (auto blockX = 0; blockX < mcuSize; blockX += 8)
{
// now we finally have an 8x8 block ...
for (auto deltaY = 0; deltaY < 8; deltaY++)
{
auto column = minimum(mcuX + blockX , maxWidth); // must not exceed image borders, replicate last row/column if needed
auto row = minimum(mcuY + blockY + deltaY, maxHeight);
for (auto deltaX = 0; deltaX < 8; deltaX++)
{
// find actual pixel position within the current image
auto pixelPos = row * int(width) + column; // the cast ensures that we don't run into multiplication overflows
if (column < maxWidth)
column++;
// grayscale images have solely a Y channel which can be easily derived from the input pixel by shifting it by 128
if (!isRGB)
{
Y[deltaY][deltaX] = pixels[pixelPos] - 128.f;
continue;
}
// RGB: 3 bytes per pixel (whereas grayscale images have only 1 byte per pixel)
auto r = pixels[3 * pixelPos ];
auto g = pixels[3 * pixelPos + 1];
auto b = pixels[3 * pixelPos + 2];
Y [deltaY][deltaX] = rgb2y (r, g, b) - 128; // again, the JPEG standard requires Y to be shifted by 128
// YCbCr444 is easy - the more complex YCbCr420 has to be computed about 20 lines below in a second pass
if (!downsample)
{
Cb[deltaY][deltaX] = rgb2cb(r, g, b); // standard RGB-to-YCbCr conversion
Cr[deltaY][deltaX] = rgb2cr(r, g, b);
}
}
}
// encode Y channel
lastYDC = encodeBlock(bitWriter, Y, scaledLuminance, lastYDC, huffmanLuminanceDC, huffmanLuminanceAC, codewords);
// Cb and Cr are encoded about 50 lines below
}
// grayscale images don't need any Cb and Cr information
if (!isRGB)
continue;
// ////////////////////////////////////////
// the following lines are only relevant for YCbCr420:
// average/downsample chrominance of four pixels while respecting the image borders
if (downsample)
for (short deltaY = 7; downsample && deltaY >= 0; deltaY--) // iterating loop in reverse increases cache read efficiency
{
auto row = minimum(mcuY + 2*deltaY, maxHeight); // each deltaX/Y step covers a 2x2 area
auto column = mcuX; // column is updated inside next loop
auto pixelPos = (row * int(width) + column) * 3; // numComponents = 3
// deltas (in bytes) to next row / column, must not exceed image borders
auto rowStep = (row < maxHeight) ? 3 * int(width) : 0; // always numComponents*width except for bottom line
auto columnStep = (column < maxWidth ) ? 3 : 0; // always numComponents except for rightmost pixel
for (short deltaX = 0; deltaX < 8; deltaX++)
{
// let's add all four samples (2x2 area)
auto right = pixelPos + columnStep;
auto down = pixelPos + rowStep;
auto downRight = pixelPos + columnStep + rowStep;
// note: cast from 8 bits to >8 bits to avoid overflows when adding
auto r = short(pixels[pixelPos ]) + pixels[right ] + pixels[down ] + pixels[downRight ];
auto g = short(pixels[pixelPos + 1]) + pixels[right + 1] + pixels[down + 1] + pixels[downRight + 1];
auto b = short(pixels[pixelPos + 2]) + pixels[right + 2] + pixels[down + 2] + pixels[downRight + 2];
// convert to Cb and Cr
Cb[deltaY][deltaX] = rgb2cb(r, g, b) / 4; // I still have to divide r,g,b by 4 to get their average values
Cr[deltaY][deltaX] = rgb2cr(r, g, b) / 4; // it's a bit faster if done AFTER CbCr conversion
// step forward to next 2x2 area
pixelPos += 2*3; // 2 pixels => 6 bytes (2*numComponents)
column += 2;
// reached right border ?
if (column >= maxWidth)
{
columnStep = 0;
pixelPos = ((row + 1) * int(width) - 1) * 3; // same as (row * width + maxWidth) * numComponents => current's row last pixel
}
}
} // end of YCbCr420 code for Cb and Cr
// encode Cb and Cr
lastCbDC = encodeBlock(bitWriter, Cb, scaledChrominance, lastCbDC, huffmanChrominanceDC, huffmanChrominanceAC, codewords);
lastCrDC = encodeBlock(bitWriter, Cr, scaledChrominance, lastCrDC, huffmanChrominanceDC, huffmanChrominanceAC, codewords);
}
bitWriter.flush(); // now image is completely encoded, write any bits still left in the buffer
// ///////////////////////////
// EOI marker
bitWriter << 0xFF << 0xD9; // this marker has no length, therefore I can't use addMarker()
return true;
} // writeJpeg()
} // namespace TooJpeg
|
3e141b4a8207ffca2ff21f1385d64166cce3f06a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include <math.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <algorithm>
#include <map>
using namespace std;
//Estructura de nodo
struct Node {
short x;
short y;
short cost;
float heuristic;
float score;
Node() {}
Node(short x_, short y_, float heuristic_, short cost_) {
x = x_;
y = y_;
heuristic = heuristic_;
cost = cost_;
score = heuristic_ + cost_;
}
void setNode(short x_, short y_, float heuristic_, short cost_) {
x = x_;
y = y_;
heuristic = heuristic_;
cost = cost_;
score = heuristic_ + cost_;
}
// Le pasamos un nodo y lo copia a s
void copyNode(Node toCopy) {
x = toCopy.x;
y = toCopy.y;
heuristic = toCopy.heuristic;
cost = toCopy.cost;
score = toCopy.score;
}
void printMe() {
cout << endl << "x: " << x << endl;
cout << "y: " << y << endl;
cout << "heuristic: " << heuristic << endl;
cout << "cost: " << cost << endl;
cout << "score: " << score << endl;
}
void updateCost(short cost_) {
cost = cost_;
score = heuristic + cost_;
}
};
//Estructura de matriz (maze)
struct Matrix {
short rows;
short cols;
vector<short> data;
Matrix(short rows_, short cols_):rows(rows_), cols(cols_), data(rows*cols) { }
short & operator()(size_t row, size_t col) {
return data[row*cols+col];
}
short operator()(size_t row, size_t col) const {
return data[row*cols+col];
}
};
// Funcion para calcular heuristica de la matriz
__global__ void calcHeuristicOnGPU(float *heuristicMat, short rows, short cols, short finalX, short finalY) {
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < cols && iy < rows) {
for(int i = 0; i < rows; i++) {
heuristicMat[iy*cols+ix] = sqrt(pow(finalX - ix, 2) + pow(finalY - iy, 2));
}
}
}
void debugHeuristicMat(float *heuristicMatHost, short rows, short cols) {
for(int i = 0; i < rows; i++ ) {
for(int j = 0; j < cols; j++ ) {
// cout << "x: " << j << " y: " << i << endl;
cout << "x: " << j << " y: " << i << " heuristic: " << heuristicMatHost[j+i*cols] << endl;
/* cout << "x: " << j << " y: " << i << "heuristic: " << heuristicMatHost[j] << endl; */
}
}
}
//Funcion para hacer el sort
bool sortQueue (Node a, Node b) { return (a.score < b.score); }
string findPath(Node currentNode, map<string, string> &cameFrom) {
string key = to_string(currentNode.x) + "-" + to_string(currentNode.y);
string value = cameFrom[key];
string path = key + "\n";
while (value != "START") {
path += value + "\n";
key = value;
value = cameFrom[key];
}
return path;
}
short isInSet(Node node, vector<Node> &set) {
for (int i = 0; i < set.size(); i++) {
if (node.x == set[i].x && node.y == set[i].y) return i;
}
return -1;
}
void expandNode(Node currentNode, vector<Node> &openSet, vector<Node> &closedSet, map<string, string> &cameFrom, Matrix maze, float *heuristicMatHost) {
vector<Node> tempNodes;
short x = currentNode.x;
short y = currentNode.y;
short cost = currentNode.cost + 1;
// Left
short _x = x - 1;
short _y = y;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Right
_x = x + 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Up
_x = x;
_y = y - 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Down
_y = y + 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Checamos cada vecino
for (int i = 0; i < tempNodes.size(); i++) {
// Si est en el closed set, no hacemos nada con el nodo
if (isInSet(tempNodes[i], closedSet) > -1) continue;
short index = isInSet(tempNodes[i], openSet);
if (index == -1) { // Si no est en openSet lo metemos a openSet
openSet.push_back(tempNodes[i]);
} else { // si s est en openSet, checamos si llegamos con mejor score y lo actualizamos
if (tempNodes[i].score >= currentNode.score) continue;
openSet[index].updateCost(tempNodes[i].cost);
}
string key = to_string(tempNodes[i].x) + "-" + to_string(tempNodes[i].y);
string value = to_string(x) + "-" + to_string(y);
cameFrom[key] = value;
}
}
void aStarSearch(Matrix maze, short initialX, short initialY, short finalX, short finalY, float *heuristicMatHost) {
vector<Node> closedSet; // Set of nodes already evaluated
//Creamos el nodo inicial
float heuristic = sqrt(pow(finalX - initialX, 2) + pow(finalY - initialY, 2));
Node initialNode(initialX, initialY, heuristic, 0);
// The set of currently discovered nodes that are not evaluated yet.
// Initially, only the start node is known.
vector<Node> openSet;
openSet.push_back(initialNode);
// For each node, which node it can most efficiently be reached from.
// If a node can be reached from many nodes, cameFrom will eventually contain the
// most efficient previous step.
// cameFrom := an empty map
// el key es current y el value es parent
map<string, string> cameFrom;
string key = to_string(initialNode.x) + "-" + to_string(initialNode.y);
cameFrom[key] = "START";
bool foundSolution = false;
while(!openSet.empty()) {
// Sorteamos los nodos dependiendo del score
sort(openSet.begin(), openSet.end(), sortQueue);
Node currentNode = openSet.front();
// Checamos si llegamos al goal
if (currentNode.x == finalX && currentNode.y == finalY) {
cout << "solution found" << endl;
foundSolution = true;
ofstream myfile;
myfile.open ("public/solution.txt");
myfile << findPath(currentNode, cameFrom);
myfile.close();
break;
}
move(openSet.begin(), openSet.begin() + 1, back_inserter(closedSet));
openSet.erase(openSet.begin());
expandNode(currentNode, openSet, closedSet, cameFrom, maze, heuristicMatHost);
}
cout << "End of Search" << endl;
}
int main(int argc, char * argv[]) {
//Tenemos por default el nombre del txt
char * mazeText = "public/python-generated-maze.txt";
// Si nos dieron los file names
if(argc == 2) mazeText = argv[1];
// Abrimos el file
FILE* file_ptr = fopen(mazeText, "r");
if(file_ptr == NULL) {
cout << "ERROR : Unable to open file " << endl;
exit(EXIT_FAILURE);
}
// Inicializamos variables
short rows, cols, initialX, initialY, finalX, finalY;
fscanf(file_ptr, "%hu %hu %hu %hu %hu %hu", &rows, &cols, &initialX, &initialY, &finalX, &finalY);
//Iteramos a traves de la matriz para poner los valores
Matrix maze(rows, cols);
for(int i = 0; i < maze.rows; i++) {
for(int j = 0; j < maze.cols; j++) {
fscanf(file_ptr, "%hu", &maze(i, j));
}
}
//COSAS DE CUDA -------------------------------------------
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
//Bytes
short nxy = rows * cols;
float nBytes = nxy * sizeof(float);
//MALLOC para host matrix
float *heuristicMatHost;
heuristicMatHost = (float *)malloc(nBytes);
//Memset del host matrix
memset(heuristicMatHost, 0, nBytes);
// Malloc and copy memory to device
float *heuristicMat;
SAFE_CALL(hipMalloc((void **)&heuristicMat, nBytes), "Error allocating heuristicMat");
SAFE_CALL(hipMemcpy(heuristicMat, heuristicMatHost, nBytes, hipMemcpyHostToDevice), "Error copying Heuristic Mat to Device");
// invoke kernel at host side
int dimx = 16;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((cols + block.x - 1) / block.x, (rows + block.y - 1) / block.y);
// Mandamos a llamar a hacer la matriz
hipLaunchKernelGGL(( calcHeuristicOnGPU), dim3(grid), dim3(block), 0, 0, heuristicMat, cols, rows, finalX, finalY);
// SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(heuristicMatHost, heuristicMat, nBytes, hipMemcpyDeviceToHost), "Error copying heuristic back to host");
auto start_cpu = chrono::high_resolution_clock::now();
aStarSearch(maze, initialX, initialY, finalX, finalY, heuristicMatHost);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
cout << "Time for Astar Search: " << duration_ms.count() << endl;
// free device global memory
SAFE_CALL(hipFree(heuristicMat), "Error freeing memory");
// debug
// debugHeuristicMat(heuristicMatHost, rows, cols);
// free host memory
free(heuristicMatHost);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return 0;
}
| 3e141b4a8207ffca2ff21f1385d64166cce3f06a.cu | #include "common.h"
#include <math.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <algorithm>
#include <map>
using namespace std;
//Estructura de nodo
struct Node {
short x;
short y;
short cost;
float heuristic;
float score;
Node() {}
Node(short x_, short y_, float heuristic_, short cost_) {
x = x_;
y = y_;
heuristic = heuristic_;
cost = cost_;
score = heuristic_ + cost_;
}
void setNode(short x_, short y_, float heuristic_, short cost_) {
x = x_;
y = y_;
heuristic = heuristic_;
cost = cost_;
score = heuristic_ + cost_;
}
// Le pasamos un nodo y lo copia a sí
void copyNode(Node toCopy) {
x = toCopy.x;
y = toCopy.y;
heuristic = toCopy.heuristic;
cost = toCopy.cost;
score = toCopy.score;
}
void printMe() {
cout << endl << "x: " << x << endl;
cout << "y: " << y << endl;
cout << "heuristic: " << heuristic << endl;
cout << "cost: " << cost << endl;
cout << "score: " << score << endl;
}
void updateCost(short cost_) {
cost = cost_;
score = heuristic + cost_;
}
};
//Estructura de matriz (maze)
struct Matrix {
short rows;
short cols;
vector<short> data;
Matrix(short rows_, short cols_):rows(rows_), cols(cols_), data(rows*cols) { }
short & operator()(size_t row, size_t col) {
return data[row*cols+col];
}
short operator()(size_t row, size_t col) const {
return data[row*cols+col];
}
};
// Funcion para calcular heuristica de la matriz
__global__ void calcHeuristicOnGPU(float *heuristicMat, short rows, short cols, short finalX, short finalY) {
//Codigo de clase
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < cols && iy < rows) {
for(int i = 0; i < rows; i++) {
heuristicMat[iy*cols+ix] = sqrt(pow(finalX - ix, 2) + pow(finalY - iy, 2));
}
}
}
void debugHeuristicMat(float *heuristicMatHost, short rows, short cols) {
for(int i = 0; i < rows; i++ ) {
for(int j = 0; j < cols; j++ ) {
// cout << "x: " << j << " y: " << i << endl;
cout << "x: " << j << " y: " << i << " heuristic: " << heuristicMatHost[j+i*cols] << endl;
/* cout << "x: " << j << " y: " << i << "heuristic: " << heuristicMatHost[j] << endl; */
}
}
}
//Funcion para hacer el sort
bool sortQueue (Node a, Node b) { return (a.score < b.score); }
string findPath(Node currentNode, map<string, string> &cameFrom) {
string key = to_string(currentNode.x) + "-" + to_string(currentNode.y);
string value = cameFrom[key];
string path = key + "\n";
while (value != "START") {
path += value + "\n";
key = value;
value = cameFrom[key];
}
return path;
}
short isInSet(Node node, vector<Node> &set) {
for (int i = 0; i < set.size(); i++) {
if (node.x == set[i].x && node.y == set[i].y) return i;
}
return -1;
}
void expandNode(Node currentNode, vector<Node> &openSet, vector<Node> &closedSet, map<string, string> &cameFrom, Matrix maze, float *heuristicMatHost) {
vector<Node> tempNodes;
short x = currentNode.x;
short y = currentNode.y;
short cost = currentNode.cost + 1;
// Left
short _x = x - 1;
short _y = y;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Right
_x = x + 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Up
_x = x;
_y = y - 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Down
_y = y + 1;
if (maze(_y, _x) != 1) tempNodes.push_back(*new Node(_x, _y, heuristicMatHost[_x+_y*maze.cols], cost));
// Checamos cada vecino
for (int i = 0; i < tempNodes.size(); i++) {
// Si está en el closed set, no hacemos nada con el nodo
if (isInSet(tempNodes[i], closedSet) > -1) continue;
short index = isInSet(tempNodes[i], openSet);
if (index == -1) { // Si no está en openSet lo metemos a openSet
openSet.push_back(tempNodes[i]);
} else { // si sí está en openSet, checamos si llegamos con mejor score y lo actualizamos
if (tempNodes[i].score >= currentNode.score) continue;
openSet[index].updateCost(tempNodes[i].cost);
}
string key = to_string(tempNodes[i].x) + "-" + to_string(tempNodes[i].y);
string value = to_string(x) + "-" + to_string(y);
cameFrom[key] = value;
}
}
void aStarSearch(Matrix maze, short initialX, short initialY, short finalX, short finalY, float *heuristicMatHost) {
vector<Node> closedSet; // Set of nodes already evaluated
//Creamos el nodo inicial
float heuristic = sqrt(pow(finalX - initialX, 2) + pow(finalY - initialY, 2));
Node initialNode(initialX, initialY, heuristic, 0);
// The set of currently discovered nodes that are not evaluated yet.
// Initially, only the start node is known.
vector<Node> openSet;
openSet.push_back(initialNode);
// For each node, which node it can most efficiently be reached from.
// If a node can be reached from many nodes, cameFrom will eventually contain the
// most efficient previous step.
// cameFrom := an empty map
// el key es current y el value es parent
map<string, string> cameFrom;
string key = to_string(initialNode.x) + "-" + to_string(initialNode.y);
cameFrom[key] = "START";
bool foundSolution = false;
while(!openSet.empty()) {
// Sorteamos los nodos dependiendo del score
sort(openSet.begin(), openSet.end(), sortQueue);
Node currentNode = openSet.front();
// Checamos si llegamos al goal
if (currentNode.x == finalX && currentNode.y == finalY) {
cout << "solution found" << endl;
foundSolution = true;
ofstream myfile;
myfile.open ("public/solution.txt");
myfile << findPath(currentNode, cameFrom);
myfile.close();
break;
}
move(openSet.begin(), openSet.begin() + 1, back_inserter(closedSet));
openSet.erase(openSet.begin());
expandNode(currentNode, openSet, closedSet, cameFrom, maze, heuristicMatHost);
}
cout << "End of Search" << endl;
}
int main(int argc, char * argv[]) {
//Tenemos por default el nombre del txt
char * mazeText = "public/python-generated-maze.txt";
// Si nos dieron los file names
if(argc == 2) mazeText = argv[1];
// Abrimos el file
FILE* file_ptr = fopen(mazeText, "r");
if(file_ptr == NULL) {
cout << "ERROR : Unable to open file " << endl;
exit(EXIT_FAILURE);
}
// Inicializamos variables
short rows, cols, initialX, initialY, finalX, finalY;
fscanf(file_ptr, "%hu %hu %hu %hu %hu %hu", &rows, &cols, &initialX, &initialY, &finalX, &finalY);
//Iteramos a traves de la matriz para poner los valores
Matrix maze(rows, cols);
for(int i = 0; i < maze.rows; i++) {
for(int j = 0; j < maze.cols; j++) {
fscanf(file_ptr, "%hu", &maze(i, j));
}
}
//COSAS DE CUDA -------------------------------------------
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
//Bytes
short nxy = rows * cols;
float nBytes = nxy * sizeof(float);
//MALLOC para host matrix
float *heuristicMatHost;
heuristicMatHost = (float *)malloc(nBytes);
//Memset del host matrix
memset(heuristicMatHost, 0, nBytes);
// Malloc and copy memory to device
float *heuristicMat;
SAFE_CALL(cudaMalloc((void **)&heuristicMat, nBytes), "Error allocating heuristicMat");
SAFE_CALL(cudaMemcpy(heuristicMat, heuristicMatHost, nBytes, cudaMemcpyHostToDevice), "Error copying Heuristic Mat to Device");
// invoke kernel at host side
int dimx = 16;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((cols + block.x - 1) / block.x, (rows + block.y - 1) / block.y);
// Mandamos a llamar a hacer la matriz
calcHeuristicOnGPU<<<grid, block>>>(heuristicMat, cols, rows, finalX, finalY);
// SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(heuristicMatHost, heuristicMat, nBytes, cudaMemcpyDeviceToHost), "Error copying heuristic back to host");
auto start_cpu = chrono::high_resolution_clock::now();
aStarSearch(maze, initialX, initialY, finalX, finalY, heuristicMatHost);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
cout << "Time for Astar Search: " << duration_ms.count() << endl;
// free device global memory
SAFE_CALL(cudaFree(heuristicMat), "Error freeing memory");
// debug
// debugHeuristicMat(heuristicMatHost, rows, cols);
// free host memory
free(heuristicMatHost);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return 0;
}
|
8c75b3f65e3ddd998ed41a5276dca529c22f3ecc.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <string>
#include <vector>
#include <iostream>
using namespace std;
int main() {
string str = "\002banana\003";
vector<string> table;
for (int i = 0; i < str.length(); i++) {
string temp = str.substr(i, str.length()) + str.substr(0, i);
table.push_back(temp);
}
thrust::device_vector<char*> device_table;
for (int i = 0; i < table.size(); i++) {
char* temp;
hipMalloc((void**)&temp, sizeof(char) * (str.length() + 1));
hipMemcpy(temp, table[i].c_str(), sizeof(char) * (str.length() + 1), hipMemcpyHostToDevice);
device_table.push_back(temp);
}
thrust::sort(device_table.begin(), device_table.end());
char* result;
hipHostMalloc((void**)&result, sizeof(char) * (device_table.size() + 1));
for (int i = 0; i < device_table.size(); i++) {
char* temp;
hipHostMalloc((void**)&temp, sizeof(char) * (str.length() + 1));
hipMemcpy(temp, device_table[i], sizeof(char) * (str.length() + 1), hipMemcpyDeviceToHost);
result[i] = temp[str.length() - 1];
hipHostFree(temp);
}
cout << result << endl;
for (int i = 0; i < device_table.size(); i++) {
hipFree(device_table[i]);
}
hipHostFree(result);
return 0;
}
| 8c75b3f65e3ddd998ed41a5276dca529c22f3ecc.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <string>
#include <vector>
#include <iostream>
using namespace std;
int main() {
string str = "\002banana\003";
vector<string> table;
for (int i = 0; i < str.length(); i++) {
string temp = str.substr(i, str.length()) + str.substr(0, i);
table.push_back(temp);
}
thrust::device_vector<char*> device_table;
for (int i = 0; i < table.size(); i++) {
char* temp;
cudaMalloc((void**)&temp, sizeof(char) * (str.length() + 1));
cudaMemcpy(temp, table[i].c_str(), sizeof(char) * (str.length() + 1), cudaMemcpyHostToDevice);
device_table.push_back(temp);
}
thrust::sort(device_table.begin(), device_table.end());
char* result;
cudaMallocHost((void**)&result, sizeof(char) * (device_table.size() + 1));
for (int i = 0; i < device_table.size(); i++) {
char* temp;
cudaMallocHost((void**)&temp, sizeof(char) * (str.length() + 1));
cudaMemcpy(temp, device_table[i], sizeof(char) * (str.length() + 1), cudaMemcpyDeviceToHost);
result[i] = temp[str.length() - 1];
cudaFreeHost(temp);
}
cout << result << endl;
for (int i = 0; i < device_table.size(); i++) {
cudaFree(device_table[i]);
}
cudaFreeHost(result);
return 0;
}
|
95a2d029f8c80c40c19431ede4ecc0355b8119dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/output/gw_output.cuh"
__constant__ letype d_H = 0.0;
__constant__ letype d_a = 0.0;
__constant__ letype d_asr = 0.0;
__constant__ cufft_type* d_f[6];
__constant__ fTT_type* d_tf[6];
__global__ void adjustDerivativeValues(letype* h, letype* hd, const letype meanAcc, const int direction)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
hd[index(x, y, z)] -= direction * rescale_r * d_H * (h[index(x, y, z)]);
}
void startAdjustDerivativeValues(letype** h, letype** hd, const int gwfld, const int direction)
{
letype H = ad/a;
hipMemcpyToSymbol(d_H, &H, sizeof(letype));
double asr = pow(a, rescale_s - rescale_r);
hipMemcpyToSymbol(d_a, &a, sizeof(letype));
hipMemcpyToSymbol(d_asr, &asr, sizeof(letype));
const dim3 numBlocks(N / tileSize, N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize, tileSize, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startAdjustDerivativeValues: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
//printf("hd before:\t%.20f, h before:\t%.20f, value:\t%.20f\n", hd[0][index(10, 20, 30)], h[0][index(10, 20, 30)], rescale_r * ad/a);
for(int i = 0; i < 6; i++)
{
hipMemPrefetchAsync(h[i], gridsize * sizeof(letype), 0, NULL);
hipMemPrefetchAsync(hd[i], gridsize * sizeof(letype), 0, NULL);
hipLaunchKernelGGL(( adjustDerivativeValues), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, h[i], hd[i], hmean_acc[gwfld][i], direction);
}
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
//printf("hd after:\t%.20f, h after:\t%.20f, value:\t%.20f\n", hd[0][index(10, 20, 30)], h[0][index(10, 20, 30)], rescale_r * ad/a);
}
__device__ inline double keff(int i, const int nVals[])
{
return 2.0 * M_PI * nVals[i] / (double)L; // continuum projector
double res = 0.0;
#pragma unroll
for(int k = 1; k <= cfdHaloSize; k++)
res += cu_cfd_stencil_d[cfdHaloSize + k] * sinpi(2.0 * k * nVals[i] / (double)N);
return 2.0 * res / dx; // modified projector
}
__device__ inline double keffabs2(const int nVals[])
{
double res = 0.0;
for(int i = 0; i < NDIMS; i++)
res += keff(i, nVals) * keff(i, nVals);
return res;
}
__device__ inline double P0(int i, int j, const int nVals[])
{
if(nVals[i] == 0 || nVals[i] == N/2 || nVals[i] == -N/2)
if(nVals[j] == 0 || nVals[j] == N/2 || nVals[j] == -N/2)
{
if(i == j)
return 0.0;
return 0.0;
}
double res = 0.0;
if(i == j)
res += 1.0;
res -= keff(i, nVals) * keff(j, nVals) / keffabs2(nVals);
return res;
}
__device__ inline double lam0(int i, int j, int l, int m, const int nVals[])
{
return P0(i, l, nVals) * P0(j, m, nVals) - 0.5 * P0(i, j, nVals) * P0(l, m, nVals);
}
const double traceMargin = 10e-4;
__global__ void testTTProj()
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
const int nVals[3] = {x - N/2, y - N/2, z - N/2};
double val_r = 0.0;
double val_c = 0.0;
//check tracelessness
for(int i = 0; i < 3; i++)
{
val_r += d_tf[indexTensor(i, i)][indexFT(x, y, z)].x;
val_c += d_tf[indexTensor(i, i)][indexFT(x, y, z)].y;
}
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: trace is too large at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
int ii = 0;
int ji = 0;
//check transversality
for(int j = 0; j < 3; j++)
{
val_r = 0.0;
val_c = 0.0;
for(int i = 0; i < 3; i++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
val_r += keff(i, nVals) * d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].x;
val_c += keff(i, nVals) * d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].y;
}
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: transversality test failed at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
}
//check
for(int j = 0; j < 3; j++)
{
val_r = 0.0;
val_c = 0.0;
for(int i = 0; i < 3; i++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
val_r = d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].x - d_tf[indexTensor(ii, ji)][indexFT(N - x, N - y, N - z)].x;
val_c = d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].y + d_tf[indexTensor(ii, ji)][indexFT(N - x, N - y, N - z)].y;
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: conjugate test failed at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
}
}
}
void startTestTTProj()
{
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startTTProj: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
//testTTProj<<<numBlocks, threadsPerBlock>>>();
printf("Sarting the TT projection test\n");
//testTTProj<<<1, 10>>>();
hipLaunchKernelGGL(( testTTProj), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, );
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
__global__ void TTProj(cufft_type** f, fTT_type** tf)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
const int nVals[3] = {x - N/2, y - N/2, z - N/2};
int li = 0;
int mi = 0;
int ii = 0;
int ji = 0;
double tf_temp_r[NDIMS * NDIMS];
double tf_temp_c[NDIMS * NDIMS];
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
tf_temp_r[indexTensor(ii, ji)] = 0.0;
tf_temp_c[indexTensor(ii, ji)] = 0.0;
for(int l = 0; l < 3; l++)
for(int m = 0; m < 3; m++)
{
if(l >= m)
{
li = l;
mi = m;
}
else
{
li = m;
mi = l;
}
tf_temp_r[indexTensor(ii, ji)] += lam0(i, j, l, m, nVals) * f[indexTensor(li, mi)][indexFT(x, y, z)].x;
tf_temp_c[indexTensor(ii, ji)] += lam0(i, j, l, m, nVals) * f[indexTensor(li, mi)][indexFT(x, y, z)].y;
}
tf[indexTensor(ii, ji)][indexFT(x, y, z)].x = tf_temp_r[indexTensor(ii, ji)];
tf[indexTensor(ii, ji)][indexFT(x, y, z)].y = tf_temp_c[indexTensor(ii, ji)];
}
}
void startTTProj(cufft_type** f, fTT_type** tf)
{
gpuErrchk(hipMemcpyToSymbol(d_f, f, 6 * sizeof(cufft_type*)));
gpuErrchk(hipMemcpyToSymbol(d_tf, tf, 6 * sizeof(fTT_type*)));
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startTTProj: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
int device = -1;
hipGetDevice(&device);
const size_t gridsize_ft = (N + 1) * (N + 1) * (N + 1);
for(int fld = 0; fld < 6; fld++)
{
gpuErrchk(hipMemPrefetchAsync(f[fld], gridsize_ft * sizeof(cufft_type), device, NULL));
gpuErrchk(hipMemPrefetchAsync(tf[fld], gridsize_ft * sizeof(fTT_type), device, NULL));
}
hipLaunchKernelGGL(( TTProj), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, f, tf);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
__global__ void addGWFields(int gwnflds, fTT_type*** hdkTT)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
for(int i = 0; i < 6; i++)
{
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].x = 0.0;
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].y = 0.0;
}
for(int i = 0; i < 6; i++)
{
for(int gwfld = 0; gwfld < gwnflds - 1; gwfld++)
{
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].x += hdkTT[gwfld][i][indexFT(x, y, z)].x;
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].y += hdkTT[gwfld][i][indexFT(x, y, z)].y;
}
}
}
void startAddGWFields(int gwnflds, fTT_type*** hdkTT)
{
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startAddGWFields: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
hipLaunchKernelGGL(( addGWFields), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, gwnflds, hdkTT);
gpuErrchk(hipPeekAtLastError());
} | 95a2d029f8c80c40c19431ede4ecc0355b8119dd.cu | #include "src/output/gw_output.cuh"
__constant__ letype d_H = 0.0;
__constant__ letype d_a = 0.0;
__constant__ letype d_asr = 0.0;
__constant__ cufft_type* d_f[6];
__constant__ fTT_type* d_tf[6];
__global__ void adjustDerivativeValues(letype* h, letype* hd, const letype meanAcc, const int direction)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
hd[index(x, y, z)] -= direction * rescale_r * d_H * (h[index(x, y, z)]);
}
void startAdjustDerivativeValues(letype** h, letype** hd, const int gwfld, const int direction)
{
letype H = ad/a;
cudaMemcpyToSymbol(d_H, &H, sizeof(letype));
double asr = pow(a, rescale_s - rescale_r);
cudaMemcpyToSymbol(d_a, &a, sizeof(letype));
cudaMemcpyToSymbol(d_asr, &asr, sizeof(letype));
const dim3 numBlocks(N / tileSize, N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize, tileSize, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startAdjustDerivativeValues: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
//printf("hd before:\t%.20f, h before:\t%.20f, value:\t%.20f\n", hd[0][index(10, 20, 30)], h[0][index(10, 20, 30)], rescale_r * ad/a);
for(int i = 0; i < 6; i++)
{
cudaMemPrefetchAsync(h[i], gridsize * sizeof(letype), 0, NULL);
cudaMemPrefetchAsync(hd[i], gridsize * sizeof(letype), 0, NULL);
adjustDerivativeValues<<<numBlocks, threadsPerBlock>>>(h[i], hd[i], hmean_acc[gwfld][i], direction);
}
gpuErrchk(cudaPeekAtLastError());
cudaDeviceSynchronize();
//printf("hd after:\t%.20f, h after:\t%.20f, value:\t%.20f\n", hd[0][index(10, 20, 30)], h[0][index(10, 20, 30)], rescale_r * ad/a);
}
__device__ inline double keff(int i, const int nVals[])
{
return 2.0 * M_PI * nVals[i] / (double)L; // continuum projector
double res = 0.0;
#pragma unroll
for(int k = 1; k <= cfdHaloSize; k++)
res += cu_cfd_stencil_d[cfdHaloSize + k] * sinpi(2.0 * k * nVals[i] / (double)N);
return 2.0 * res / dx; // modified projector
}
__device__ inline double keffabs2(const int nVals[])
{
double res = 0.0;
for(int i = 0; i < NDIMS; i++)
res += keff(i, nVals) * keff(i, nVals);
return res;
}
__device__ inline double P0(int i, int j, const int nVals[])
{
if(nVals[i] == 0 || nVals[i] == N/2 || nVals[i] == -N/2)
if(nVals[j] == 0 || nVals[j] == N/2 || nVals[j] == -N/2)
{
if(i == j)
return 0.0;
return 0.0;
}
double res = 0.0;
if(i == j)
res += 1.0;
res -= keff(i, nVals) * keff(j, nVals) / keffabs2(nVals);
return res;
}
__device__ inline double lam0(int i, int j, int l, int m, const int nVals[])
{
return P0(i, l, nVals) * P0(j, m, nVals) - 0.5 * P0(i, j, nVals) * P0(l, m, nVals);
}
const double traceMargin = 10e-4;
__global__ void testTTProj()
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
const int nVals[3] = {x - N/2, y - N/2, z - N/2};
double val_r = 0.0;
double val_c = 0.0;
//check tracelessness
for(int i = 0; i < 3; i++)
{
val_r += d_tf[indexTensor(i, i)][indexFT(x, y, z)].x;
val_c += d_tf[indexTensor(i, i)][indexFT(x, y, z)].y;
}
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: trace is too large at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
int ii = 0;
int ji = 0;
//check transversality
for(int j = 0; j < 3; j++)
{
val_r = 0.0;
val_c = 0.0;
for(int i = 0; i < 3; i++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
val_r += keff(i, nVals) * d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].x;
val_c += keff(i, nVals) * d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].y;
}
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: transversality test failed at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
}
//check
for(int j = 0; j < 3; j++)
{
val_r = 0.0;
val_c = 0.0;
for(int i = 0; i < 3; i++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
val_r = d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].x - d_tf[indexTensor(ii, ji)][indexFT(N - x, N - y, N - z)].x;
val_c = d_tf[indexTensor(ii, ji)][indexFT(x, y, z)].y + d_tf[indexTensor(ii, ji)][indexFT(N - x, N - y, N - z)].y;
if(abs(val_r) > traceMargin || abs(val_c) > traceMargin)
{
printf("ERROR: conjugate test failed at %d %d %d with values %e %e\n", nVals[0], nVals[1], nVals[2], val_r, val_c);
}
}
}
}
void startTestTTProj()
{
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startTTProj: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
//testTTProj<<<numBlocks, threadsPerBlock>>>();
printf("Sarting the TT projection test\n");
//testTTProj<<<1, 10>>>();
testTTProj<<<numBlocks, threadsPerBlock>>>();
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void TTProj(cufft_type** f, fTT_type** tf)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
const int nVals[3] = {x - N/2, y - N/2, z - N/2};
int li = 0;
int mi = 0;
int ii = 0;
int ji = 0;
double tf_temp_r[NDIMS * NDIMS];
double tf_temp_c[NDIMS * NDIMS];
for(int i = 0; i < 3; i++)
for(int j = 0; j < 3; j++)
{
ii = i;
ji = j;
if(j > i)
{
ii = j;
ji = i;
}
tf_temp_r[indexTensor(ii, ji)] = 0.0;
tf_temp_c[indexTensor(ii, ji)] = 0.0;
for(int l = 0; l < 3; l++)
for(int m = 0; m < 3; m++)
{
if(l >= m)
{
li = l;
mi = m;
}
else
{
li = m;
mi = l;
}
tf_temp_r[indexTensor(ii, ji)] += lam0(i, j, l, m, nVals) * f[indexTensor(li, mi)][indexFT(x, y, z)].x;
tf_temp_c[indexTensor(ii, ji)] += lam0(i, j, l, m, nVals) * f[indexTensor(li, mi)][indexFT(x, y, z)].y;
}
tf[indexTensor(ii, ji)][indexFT(x, y, z)].x = tf_temp_r[indexTensor(ii, ji)];
tf[indexTensor(ii, ji)][indexFT(x, y, z)].y = tf_temp_c[indexTensor(ii, ji)];
}
}
void startTTProj(cufft_type** f, fTT_type** tf)
{
gpuErrchk(cudaMemcpyToSymbol(d_f, f, 6 * sizeof(cufft_type*)));
gpuErrchk(cudaMemcpyToSymbol(d_tf, tf, 6 * sizeof(fTT_type*)));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startTTProj: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
int device = -1;
cudaGetDevice(&device);
const size_t gridsize_ft = (N + 1) * (N + 1) * (N + 1);
for(int fld = 0; fld < 6; fld++)
{
gpuErrchk(cudaMemPrefetchAsync(f[fld], gridsize_ft * sizeof(cufft_type), device, NULL));
gpuErrchk(cudaMemPrefetchAsync(tf[fld], gridsize_ft * sizeof(fTT_type), device, NULL));
}
TTProj<<<numBlocks, threadsPerBlock>>>(f, tf);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void addGWFields(int gwnflds, fTT_type*** hdkTT)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int y = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int z = blockIdx.z * blockDim.z + threadIdx.z + 1;
for(int i = 0; i < 6; i++)
{
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].x = 0.0;
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].y = 0.0;
}
for(int i = 0; i < 6; i++)
{
for(int gwfld = 0; gwfld < gwnflds - 1; gwfld++)
{
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].x += hdkTT[gwfld][i][indexFT(x, y, z)].x;
hdkTT[gwnflds - 1][i][indexFT(x, y, z)].y += hdkTT[gwfld][i][indexFT(x, y, z)].y;
}
}
}
void startAddGWFields(int gwnflds, fTT_type*** hdkTT)
{
const dim3 numBlocks(2 * N / tileSize, 2 * N / tileSize, N / tileSize * 4);
const dim3 threadsPerBlock(tileSize / 2, tileSize / 2, tileSize / 4);
if(threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z > 1024 || threadsPerBlock.z > 64)
{
printf("%s\n", "ERROR in startAddGWFields: Total number of threads in a block may not exceed 1024. The number of threads in the z-direction may not exceed 64\n");
abort();
}
addGWFields<<<numBlocks, threadsPerBlock>>>(gwnflds, hdkTT);
gpuErrchk(cudaPeekAtLastError());
} |
640ab4eee2c0c93b45dc104ec0cd8bdd44124e10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "roi_pool_f_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void RoIPoolFForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data, int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = roundf(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolFBackward(const int nthreads, const T* top_diff,
const int* argmax_data, const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template<>
bool RoIPoolFOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
if (R.size() == 0) {
// Handle empty rois
std::vector<int64_t> sizes = {0, X.dim32(1), pooled_height_, pooled_width_};
/* auto* Y = */ Output(0, sizes, at::dtype<float>());
/* auto* A = */ Output(1, sizes, at::dtype<int>());
return true;
}
auto* Y = Output(0, {R.dim32(0), X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
auto* A = Output(1, Y->sizes(), at::dtype<int>()); // argmaxes
int output_size = Y->size();
hipLaunchKernelGGL(( RoIPoolFForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(),
Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool RoIPoolFGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( RoIPoolFBackward<float>), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
dX->mutable_data<float>(), R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(RoIPoolF,
RoIPoolFOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolFGradient,
RoIPoolFGradientOp<float, CUDAContext>);
} // namespace caffe2
| 640ab4eee2c0c93b45dc104ec0cd8bdd44124e10.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "roi_pool_f_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void RoIPoolFForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const T* bottom_rois, T* top_data, int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = roundf(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolFBackward(const int nthreads, const T* top_diff,
const int* argmax_data, const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template<>
bool RoIPoolFOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
if (R.size() == 0) {
// Handle empty rois
std::vector<int64_t> sizes = {0, X.dim32(1), pooled_height_, pooled_width_};
/* auto* Y = */ Output(0, sizes, at::dtype<float>());
/* auto* A = */ Output(1, sizes, at::dtype<int>());
return true;
}
auto* Y = Output(0, {R.dim32(0), X.dim32(1), pooled_height_, pooled_width_}, at::dtype<float>()); // RoI pooled data
auto* A = Output(1, Y->sizes(), at::dtype<int>()); // argmaxes
int output_size = Y->size();
RoIPoolFForward<float><<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(),
Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool RoIPoolFGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
RoIPoolFBackward<float><<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
dX->mutable_data<float>(), R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(RoIPoolF,
RoIPoolFOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolFGradient,
RoIPoolFGradientOp<float, CUDAContext>);
} // namespace caffe2
|
8c3a03dcf8838b2376a9a8a16dd02d0c6810c883.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlarfx.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_S_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr v,
magmaFloat_ptr tau,
magmaFloat_ptr C, magma_int_t ldc,
magmaFloat_ptr xnorm,
magmaFloat_ptr dT, magma_int_t iter,
magmaFloat_ptr work )
{
magma_int_t N = n + iter + 1;
if (iter == 0)
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, dT+iter*N, iter);
else
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, work, iter);
if (iter > 0) {
//magma_strmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , dT, N, work, dT+iter*N, tau);
}
}
//==============================================================================
| 8c3a03dcf8838b2376a9a8a16dd02d0c6810c883.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zlarfx.cu normal z -> s, Fri Sep 11 18:29:21 2015
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x < it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x > it it perform w := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = tx; j < m; j += BLOCK_SIZE) {
if (j == 0) {
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x > it) {
for (int j = m-tx-1; j >= 0; j -= BLOCK_SIZE)
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx == 0) {
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x == it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
else if (blockIdx.x <= it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_S_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for (int j=0; j < blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0) {
y[blockIdx.x] = sum[0];
if (blockIdx.x == 0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[tx] = MAGMA_S_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx == 0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_ptr v,
magmaFloat_ptr tau,
magmaFloat_ptr C, magma_int_t ldc,
magmaFloat_ptr xnorm,
magmaFloat_ptr dT, magma_int_t iter,
magmaFloat_ptr work )
{
magma_int_t N = n + iter + 1;
if (iter == 0)
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, dT+iter*N, iter);
else
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, work, iter);
if (iter > 0) {
//magma_strmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N);
magma_strmv_kernel2<<< iter, iter, 0, magma_stream >>>( dT, N, work, dT+iter*N, tau);
}
}
//==============================================================================
|
31c48c8cd95a4507a87bd44496104eebfc740e89.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_util.h"
#include "global.h"
#include "consts.h"
#include "kd-tree.h"
#include "gpu/kd_tree_gpu.h"
#include "gpu/texture_gpu.cu"
#include <vector>
#include <hip/hip_runtime.h>
////////////
unsigned nCurrObjCount = 0;
PrimGpuObj *gpuObjs = NULL;
PrimGpuObj_host *hostObjs = NULL;
unsigned nMaxGpuKdDepth = 0;
kd_node_gpu *hostKdRoot = NULL;
kd_node_gpu *deviceKdRoot = NULL;
unsigned *hostPrimObjIdList = NULL;
unsigned *devicePrimObjIdList = NULL;
bool *deviceKdRecBuf = NULL;
unsigned nMaxKdNodeCount = 0;
unsigned nCurrLightCount = 0;
LightGpu *gpuLights = NULL;
LightCpu *cpuLights = NULL;
// Instant Radiosity
LightGpu *gpuDiffLightsPerLine = NULL;
LightCpu *cpuDiffLightsPerLine = NULL;
float *gpuRdm = NULL;
float *cpuRdm = NULL;
__device__ bool bGpuHasDiffLights = false;
////////////
void gpu_destroy()
{
if(gpuObjs)
{
hipFree(gpuObjs);
gpuObjs = NULL;
}
if(hostObjs)
{
free(hostObjs);
hostObjs = NULL;
}
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
if(deviceKdRoot)
{
hipFree(deviceKdRoot);
deviceKdRoot = NULL;
}
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
hostPrimObjIdList = NULL;
}
if(devicePrimObjIdList)
{
hipFree(devicePrimObjIdList);
devicePrimObjIdList = NULL;
}
if(deviceKdRecBuf)
{
hipFree(deviceKdRecBuf);
deviceKdRecBuf = NULL;
}
if(gpuLights)
{
hipFree(gpuLights);
gpuLights = NULL;
}
if(cpuLights)
{
free(cpuLights);
cpuLights = NULL;
}
//
if(gpuDiffLightsPerLine)
{
hipFree(gpuDiffLightsPerLine);
gpuDiffLightsPerLine = NULL;
}
if(cpuDiffLightsPerLine)
{
free(cpuDiffLightsPerLine);
cpuDiffLightsPerLine = NULL;
}
if(gpuRdm)
{
hipFree(gpuRdm);
gpuRdm = NULL;
}
if(cpuRdm)
{
free(cpuRdm);
cpuRdm = NULL;
}
unloadGpuTexture();
}
//////////////////
__constant__ int MaxRayDepth_gpu;
__constant__ float AmbiColor_gpu[3];
__constant__ float epsi_gpu;
__constant__ float fVPLPossibility_gpu;
__constant__ float fVPLIllmThreshold_gpu;
__constant__ float fVPLAtten_gpu;
void sendConstants2GPU()
{
hipError_t err = hipMemcpyToSymbol(MaxRayDepth_gpu, &MaxRayDepth, sizeof(int)/*, 0, hipMemcpyHostToDevice*/);
vect3d ambiColor; scene.getAmbiColor(ambiColor);
err = hipMemcpyToSymbol(AmbiColor_gpu, ambiColor.data, sizeof(float) * 3/*, 0, hipMemcpyHostToDevice*/);
err = hipMemcpyToSymbol(epsi_gpu, &epsi, sizeof(float) /*, 0, hipMemcpyHostToDevice*/);
//
err = hipMemcpyToSymbol(fVPLPossibility_gpu, &fVPLPossibility, sizeof(float) /*, 0, hipMemcpyHostToDevice*/);
err = hipMemcpyToSymbol(fVPLIllmThreshold_gpu, &fVPLIllmThreshold, sizeof(float) /*, 0, hipMemcpyHostToDevice*/);
err = hipMemcpyToSymbol(fVPLAtten_gpu, &fVPLAtten, sizeof(float) /*, 0, hipMemcpyHostToDevice*/);
hipDeviceSynchronize();
}
unsigned getKdTreeDepth(kd_node *pNode, unsigned nCurrLayer = 0)
{
if(pNode->child0 == NULL && pNode->child1 == NULL)
{
return nCurrLayer;
}
unsigned d1 = getKdTreeDepth(pNode->child0, nCurrLayer + 1);
unsigned d2 = getKdTreeDepth(pNode->child1, nCurrLayer + 1);
return (d1 > d2) ? d1 : d2;
}
unsigned getNodeObjCount(kd_node *pKdNode)
{
unsigned nCurrCount = pKdNode->objects.size();
unsigned n0 = 0, n1 = 0;
if(pKdNode->child0)
{
n0 = getNodeObjCount(pKdNode->child0);
}
if(pKdNode->child1)
{
n1 = getNodeObjCount(pKdNode->child1);
}
return (nCurrCount + n0 + n1);
}
unsigned buildKdTreeForGPUObjs(kd_node *pKdNode, std::vector<Object*> &vObjs, unsigned *nIdCount)
{
for(int i = 0; i < vObjs.size(); i ++)
{
pKdNode->addObject(vObjs[i]);
}
pKdNode->updateBBox();
{
// Combine the two factors this way...
kd_node::nSceneDepth = (nSceneDepth == -1 || nObjDepth == -1) ?
(8 + 1.3 * log(vObjs.size() * 1.f)) : (nSceneDepth + nObjDepth);
printf("[Building KD-tree for Primary Object on GPU of Depth %d...\n", kd_node::nSceneDepth);
pKdNode->split();
}
*nIdCount = getNodeObjCount(pKdNode);
return getKdTreeDepth(pKdNode);
}
static unsigned nCurrInxInx = 0;
void serialize_kd_tree(kd_node *pNode, kd_node_gpu *hostKdRoot, unsigned *hostPrimObjIdList, unsigned nodeId = 0)
{
if(pNode == NULL) return;
kd_node_gpu *pNodeGpu = hostKdRoot + nodeId;
pNodeGpu->_nDepth = pNode->_nDepth;
pNodeGpu->_bbox._xmin = pNode->_bbox._xmin;
pNodeGpu->_bbox._ymin = pNode->_bbox._ymin;
pNodeGpu->_bbox._zmin = pNode->_bbox._zmin;
pNodeGpu->_bbox._xmax = pNode->_bbox._xmax;
pNodeGpu->_bbox._ymax = pNode->_bbox._ymax;
pNodeGpu->_bbox._zmax = pNode->_bbox._zmax;
pNodeGpu->eAxis = pNode->eAxis;
pNodeGpu->_delim = pNode->_delim;
// assign objects
pNodeGpu->nInxStartInx = nCurrInxInx;
pNodeGpu->nInxCount = pNode->objects.size();
if(pNode->objects.size() > 0)
{
//printf("{(%d) ", nodeId);
for(int i = 0; i < pNodeGpu->nInxCount; i ++)
{
//printf("%d ", pNode->objects[i]->_id);
hostPrimObjIdList[nCurrInxInx] = pNode->objects[i]->_id;
nCurrInxInx ++;
}
//printf(" (%d)} ", pNodeGpu->nInxCount);
}
// Recursive
//
kd_node *pLeft = pNode->child0;
if(pLeft)
{
unsigned lid = (nodeId + 1) * 2 - 1;
pNodeGpu->child0Inx = lid;
serialize_kd_tree(pLeft, hostKdRoot, hostPrimObjIdList, lid);
}
kd_node *pRight = pNode->child1;
if(pRight)
{
unsigned rid = (nodeId + 1) * 2;
pNodeGpu->child1Inx = rid;
serialize_kd_tree(pRight, hostKdRoot, hostPrimObjIdList, rid);
}
}
void copySceneGeomotry()
{
///
/// NOTE: All kinds of geometry in CPU side. Since CUDA doesn't support
/// C++ features as polymorphism, they have to be handled separately.
///
std::vector<Object*> v4kdtree;
// Count all primary objs
//
unsigned nTotalPrimaryCount = 0;
for(int i = 0; i < scene.getObjectNum(); i ++)
{
Object *pObj = scene.getObject(i);
ObjType eType = pObj->getObjType();
if(eType != OBJ_CPU)
{
if(eType != CUBE_CPU)
{
nTotalPrimaryCount ++;
}
else
{
nTotalPrimaryCount += 6;
}
}
else
{
ObjObject *pObjObj = dynamic_cast<ObjObject*>(pObj); // I know...
nTotalPrimaryCount += pObjObj->getTriCount();
}
}
v4kdtree.reserve(nTotalPrimaryCount);
// 1. Re-Alloc space for Objects
if(gpuObjs)
{
hipFree(gpuObjs);
}
hipError_t err = hipMalloc((void**)&gpuObjs, sizeof(PrimGpuObj) * nTotalPrimaryCount);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
if(hostObjs)
{
free(hostObjs);
}
hostObjs = (PrimGpuObj_host*)malloc(sizeof(PrimGpuObj_host) * nTotalPrimaryCount);
if(!hostObjs)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// 2. Copy Objects
unsigned nCurrPrimObjInx = 0;
for(int i = 0; i < scene.getObjectNum(); i ++)
{
Object *pObj = scene.getObject(i);
ObjType eType = pObj->getObjType();
if(eType != OBJ_CPU)
{
PrimGpuObj_host *pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
// Copy the common part
pCurrPrimGpuObj->nId = pObj->_id;
pCurrPrimGpuObj->_fReflectionRatio = pObj->getReflectionRatio();
pCurrPrimGpuObj->_fRefractionRatio = pObj->getRefractionRatio();
pCurrPrimGpuObj->_fRefractionK = pObj->getRefractionK();
pCurrPrimGpuObj->_fEmitRatio = pObj->getEmissionRatio();
pCurrPrimGpuObj->_mat.fShininess = pObj->_mat.fShininess;
vecCopy(pCurrPrimGpuObj->_mat.specColor, pObj->_mat.specColor);
vecCopy(pCurrPrimGpuObj->_mat.diffColor, pObj->_mat.diffColor);
vecCopy(pCurrPrimGpuObj->_mat.ambiColor, pObj->_mat.ambiColor);
switch(eType)
{
case SPH_CPU:
{
Sphere *pSph = dynamic_cast<Sphere*>(pObj);
copySphere(pCurrPrimGpuObj, pSph);
v4kdtree.push_back(pSph);
nCurrPrimObjInx ++;
}
break;
case SQU_CPU:
{
Square *pSqu = dynamic_cast<Square*>(pObj);
copySquare(pCurrPrimGpuObj, pSqu);
v4kdtree.push_back(pSqu);
nCurrPrimObjInx ++;
}
break;
case CUBE_CPU:
{
Cube *pCube = dynamic_cast<Cube*>(pObj);
for(int m = 0; m < 6; m ++)
{
pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
copySquare(pCurrPrimGpuObj, pCube->_vs[m]);
v4kdtree.push_back(pCube->_vs[m]);
nCurrPrimObjInx ++;
}
}
break;
default:
printf("not supported obj type \n");
return;
break;
}
}
else
{
ObjObject *pObjObj = dynamic_cast<ObjObject*>(pObj); // I know...
unsigned nCurrTriNum = pObjObj->getTriCount();
for(int j = 0; j < nCurrTriNum; j ++)
{
Triangle *pCurrTri = pObjObj->getTriangle(j);
PrimGpuObj_host *pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
copyTriangle(pCurrPrimGpuObj, pObjObj, pCurrTri);
v4kdtree.push_back(pCurrTri);
nCurrPrimObjInx ++;
}// for
}// else
}// copy for
///
/// Build KD-Tree for GPU only
///
kd_node *pNode = new kd_node;
unsigned nTotalIdCount = 0;
unsigned nMaxDepth = buildKdTreeForGPUObjs(pNode, v4kdtree, &nTotalIdCount);
nMaxGpuKdDepth = nMaxDepth + 1;
// Get ready for the KD-Tree on GPU
//
unsigned nKdTreeNodeCount = pow(2.f, (nMaxDepth + 2) * 1.f) - 1;
nMaxKdNodeCount = nKdTreeNodeCount;
// Kd-tree recursion record buf.
if(deviceKdRecBuf)
{
hipFree(deviceKdRecBuf);
}
err = hipMalloc(&deviceKdRecBuf, sizeof(bool) * nKdTreeNodeCount * WinWidth);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
// Re-Alloc Kd-tree sapce
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
hostKdRoot = (kd_node_gpu *)malloc(sizeof(kd_node_gpu) * nKdTreeNodeCount );
if(!hostKdRoot)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
memset(hostKdRoot, 0 ,sizeof(kd_node_gpu) * nKdTreeNodeCount);
for(int i = 0; i < nKdTreeNodeCount; i ++)
{
(hostKdRoot + i)->child0Inx = -1;
(hostKdRoot + i)->child1Inx = -1;
(hostKdRoot + i)->nInxCount = 0;
}
if(deviceKdRoot)
{
hipFree(deviceKdRoot);
deviceKdRoot = NULL;
}
err = hipMalloc(&deviceKdRoot, sizeof(kd_node_gpu) * nKdTreeNodeCount);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
// Id list buf on GPU
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
}
hostPrimObjIdList = (unsigned *)malloc(sizeof(unsigned) * nTotalIdCount);// BUG: id could be repeated
if(!hostPrimObjIdList)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
if(devicePrimObjIdList)
{
hipFree(devicePrimObjIdList);
}
err = hipMalloc(&devicePrimObjIdList, sizeof(unsigned) * nTotalIdCount);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
/// Serialize KD-Tree and PrimaryObject Ids
serialize_kd_tree(pNode, hostKdRoot, hostPrimObjIdList);
nCurrInxInx = 0;
// copy KD-tree data from host to device
err = hipMemcpy(deviceKdRoot, hostKdRoot, sizeof(kd_node_gpu) * nKdTreeNodeCount, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
err = hipMemcpy(devicePrimObjIdList, hostPrimObjIdList, sizeof(unsigned) * nTotalIdCount, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
delete pNode;
// cuda copy objs
//
nCurrObjCount = nTotalPrimaryCount;
err = hipMemcpy(gpuObjs, hostObjs, sizeof(PrimGpuObj) * nTotalPrimaryCount, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
hipDeviceSynchronize();
}
void copySceneLights()
{
///
/// NOTE: All kinds of lights in CPU side. Since CUDA doesn't support
/// C++ features as polymorphism, they have to be handled separately.
///
// 1. Count Lights
//
unsigned nLightCount = scene.getLightNum();
// re-alloc space
if(gpuLights)
{
hipFree(gpuLights);
}
hipError_t err = hipMalloc(&gpuLights, sizeof(LightGpu) * nLightCount);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
if(cpuLights)
{
free(cpuLights);
}
cpuLights = (LightCpu*)malloc(sizeof(LightCpu) * nLightCount);
if(!cpuLights)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// copy to host
for(int i = 0; i < nLightCount; i ++)
{
Light *pLight = scene.getLight(i);
LightType eType = pLight->getType();
LightCpu *pCurrCpuLight = cpuLights + i;
pCurrCpuLight->eType = eType;
// common
pCurrCpuLight->_fAttenuate = pLight->_fAttenuate;
vecCopy(pCurrCpuLight->_ambientColor, pLight->_ambientColor);
vecCopy(pCurrCpuLight->_diffuseColor, pLight->_diffuseColor);
vecCopy(pCurrCpuLight->_specularColor, pLight->_specularColor);
switch(eType)
{
case OMNI_P:
{
OmniPointLight *pOPl = dynamic_cast<OmniPointLight*>(pLight);
vecCopy(pCurrCpuLight->_omni_pos, pOPl->_pos);
}
break;
case DIR_P:
{
DirPointLight *pDPl = dynamic_cast<DirPointLight*>(pLight);
vecCopy(pCurrCpuLight->_dirp_pos, pDPl->_pos);
vecCopy(pCurrCpuLight->_dirp_dir, pDPl->_dir);
}
break;
case DIR:
{
DirLight *pDl = dynamic_cast<DirLight*>(pLight);
vecCopy(pCurrCpuLight->_dir_dir, pDl->_dir);
}
break;
}
}
// copy to gpu
//
nCurrLightCount = nLightCount;
hipMemcpy(gpuLights, cpuLights, sizeof(LightGpu) * nLightCount, hipMemcpyHostToDevice);
// Alloc Diffuse Lights space
//
err = hipMalloc(&gpuDiffLightsPerLine, sizeof(LightCpu) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
cpuDiffLightsPerLine = (LightCpu *)malloc(sizeof(LightGpu) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(!cpuDiffLightsPerLine)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// Random number
err = hipMalloc(&gpuRdm, sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(err != hipSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, hipGetErrorString(err));
}
cpuRdm = (float *)malloc(sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(!cpuRdm)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
for(int i = 0; i < WinWidth ; i ++)
for(int j = 0; j < MAX_RAY_COUNT_PER_TREE; j ++)
{
*(cpuRdm + j + i * MAX_RAY_COUNT_PER_TREE) = (rand() % 1000000) / 1000000.f;
}
hipMemcpy(gpuRdm, cpuRdm, sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE, hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
void releaseSceneGeomotry()
{
nCurrObjCount = 0;
if(gpuObjs)
{
hipFree(gpuObjs);
gpuObjs = NULL;
}
if(hostObjs)
{
free(hostObjs);
hostObjs = NULL;
}
if(deviceKdRecBuf)
{
hipFree(deviceKdRecBuf);
deviceKdRecBuf = NULL;
}
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
if(deviceKdRoot)
{
hipFree(deviceKdRoot);
deviceKdRoot = NULL;
}
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
hostPrimObjIdList = NULL;
}
if(devicePrimObjIdList)
{
hipFree(devicePrimObjIdList);
devicePrimObjIdList = NULL;
}
//
if(gpuDiffLightsPerLine)
{
hipFree(gpuDiffLightsPerLine);
gpuDiffLightsPerLine = NULL;
}
if(cpuDiffLightsPerLine)
{
free(cpuDiffLightsPerLine);
cpuDiffLightsPerLine = NULL;
}
}
void releaseSceneLights()
{
nCurrLightCount = 0;
if(gpuLights)
{
hipFree(gpuLights);
gpuLights = NULL;
}
if(cpuLights)
{
free(cpuLights);
cpuLights = NULL;
}
}
static void copyTriangle(PrimGpuObj_host *pCurrPrimGpuObj, ObjObject *pObjObj, Triangle *pCurrTri)
{
pCurrPrimGpuObj->eType = TRI_GPU;
pCurrPrimGpuObj->nId = pCurrTri->_id;
pCurrPrimGpuObj->_fReflectionRatio = pObjObj->getReflectionRatio();
pCurrPrimGpuObj->_fRefractionRatio = pObjObj->getRefractionRatio();
pCurrPrimGpuObj->_fRefractionK = pObjObj->getRefractionK();
pCurrPrimGpuObj->_fEmitRatio = pObjObj->getEmissionRatio();
pCurrPrimGpuObj->_mat.fShininess = pObjObj->_mat.fShininess;
vecCopy(pCurrPrimGpuObj->_mat.specColor, pObjObj->_mat.specColor);
vecCopy(pCurrPrimGpuObj->_mat.diffColor, pObjObj->_mat.diffColor);
vecCopy(pCurrPrimGpuObj->_mat.ambiColor, pObjObj->_mat.ambiColor);
for(int n = 0; n < 3; n ++)
{
vecCopy(pCurrPrimGpuObj->_vertices[n], pCurrTri->_vertices[n]);
vecCopy(pCurrPrimGpuObj->_vnormal[n], pCurrTri->_vnormal[n]);
}
vecCopy(pCurrPrimGpuObj->_normal, pCurrTri->_normal);
pCurrPrimGpuObj->_bSmooth = pObjObj->_bSmooth;
pCurrPrimGpuObj->_bHasVNorm = pObjObj->_bHasVNorm;
// NOTE: not supported yet
pCurrPrimGpuObj->pTex = NULL;
}
static void copySquare(PrimGpuObj_host *pCurrPrimGpuObj, Square *pSqu)
{
pCurrPrimGpuObj->eType = SQU_GPU;
pCurrPrimGpuObj->nId = pSqu->_id;
vecCopy(pCurrPrimGpuObj->_vNormal, pSqu->_vNormal);
vecCopy(pCurrPrimGpuObj->_vWidthVec, pSqu->_vWidthVec);
vecCopy(pCurrPrimGpuObj->_vCenter, pSqu->_vCenter);
pCurrPrimGpuObj->_nWidth = pSqu->_nWidth;
pCurrPrimGpuObj->_nHeight = pSqu->_nHeight;
vecCopy(pCurrPrimGpuObj->_v2HeightVec, pSqu->_v2HeightVec);
vecCopy(pCurrPrimGpuObj->_v2WidthVec, pSqu->_v2WidthVec);
pCurrPrimGpuObj->a = pSqu->a;
pCurrPrimGpuObj->b = pSqu->b;
pCurrPrimGpuObj->c = pSqu->c;
pCurrPrimGpuObj->d = pSqu->d;
// Tex
if(pSqu->_tex)
{
int nMipInx = 0;
int texId = TextureManager::find(pSqu->_tex, &nMipInx);
if(texId != -1)
{
pCurrPrimGpuObj->pTex = pTexGpu + texId * MAX_GPU_MIPMAP_COUNT + nMipInx;
pCurrPrimGpuObj->eTexType = pSqu->_eTexMapType;
}
}
else
{
pCurrPrimGpuObj->pTex = NULL;
}
}
static void copySphere(PrimGpuObj_host *pCurrPrimGpuObj, Sphere *pSph)
{
pCurrPrimGpuObj->nId = pSph->_id;
pCurrPrimGpuObj->eType = SPH_GPU;
pCurrPrimGpuObj->_fRad = pSph->_fRad;
vecCopy(pCurrPrimGpuObj->_ctr, pSph->_ctr);
// Tex
int nMipInx = 0;
int texId = TextureManager::find(pSph->_tex, &nMipInx);
if(texId != -1)
{
pCurrPrimGpuObj->pTex = pTexGpu + texId * MAX_GPU_MIPMAP_COUNT + nMipInx;
pCurrPrimGpuObj->eTexType = pSph->_eTexMapType;
}
else
{
pCurrPrimGpuObj->pTex = NULL;
}
} | 31c48c8cd95a4507a87bd44496104eebfc740e89.cu | #include "gpu_util.h"
#include "global.h"
#include "consts.h"
#include "kd-tree.h"
#include "gpu/kd_tree_gpu.h"
#include "gpu/texture_gpu.cu"
#include <vector>
#include <cuda_runtime.h>
////////////
unsigned nCurrObjCount = 0;
PrimGpuObj *gpuObjs = NULL;
PrimGpuObj_host *hostObjs = NULL;
unsigned nMaxGpuKdDepth = 0;
kd_node_gpu *hostKdRoot = NULL;
kd_node_gpu *deviceKdRoot = NULL;
unsigned *hostPrimObjIdList = NULL;
unsigned *devicePrimObjIdList = NULL;
bool *deviceKdRecBuf = NULL;
unsigned nMaxKdNodeCount = 0;
unsigned nCurrLightCount = 0;
LightGpu *gpuLights = NULL;
LightCpu *cpuLights = NULL;
// Instant Radiosity
LightGpu *gpuDiffLightsPerLine = NULL;
LightCpu *cpuDiffLightsPerLine = NULL;
float *gpuRdm = NULL;
float *cpuRdm = NULL;
__device__ bool bGpuHasDiffLights = false;
////////////
void gpu_destroy()
{
if(gpuObjs)
{
cudaFree(gpuObjs);
gpuObjs = NULL;
}
if(hostObjs)
{
free(hostObjs);
hostObjs = NULL;
}
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
if(deviceKdRoot)
{
cudaFree(deviceKdRoot);
deviceKdRoot = NULL;
}
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
hostPrimObjIdList = NULL;
}
if(devicePrimObjIdList)
{
cudaFree(devicePrimObjIdList);
devicePrimObjIdList = NULL;
}
if(deviceKdRecBuf)
{
cudaFree(deviceKdRecBuf);
deviceKdRecBuf = NULL;
}
if(gpuLights)
{
cudaFree(gpuLights);
gpuLights = NULL;
}
if(cpuLights)
{
free(cpuLights);
cpuLights = NULL;
}
//
if(gpuDiffLightsPerLine)
{
cudaFree(gpuDiffLightsPerLine);
gpuDiffLightsPerLine = NULL;
}
if(cpuDiffLightsPerLine)
{
free(cpuDiffLightsPerLine);
cpuDiffLightsPerLine = NULL;
}
if(gpuRdm)
{
cudaFree(gpuRdm);
gpuRdm = NULL;
}
if(cpuRdm)
{
free(cpuRdm);
cpuRdm = NULL;
}
unloadGpuTexture();
}
//////////////////
__constant__ int MaxRayDepth_gpu;
__constant__ float AmbiColor_gpu[3];
__constant__ float epsi_gpu;
__constant__ float fVPLPossibility_gpu;
__constant__ float fVPLIllmThreshold_gpu;
__constant__ float fVPLAtten_gpu;
void sendConstants2GPU()
{
cudaError_t err = cudaMemcpyToSymbol(MaxRayDepth_gpu, &MaxRayDepth, sizeof(int)/*, 0, cudaMemcpyHostToDevice*/);
vect3d ambiColor; scene.getAmbiColor(ambiColor);
err = cudaMemcpyToSymbol(AmbiColor_gpu, ambiColor.data, sizeof(float) * 3/*, 0, cudaMemcpyHostToDevice*/);
err = cudaMemcpyToSymbol(epsi_gpu, &epsi, sizeof(float) /*, 0, cudaMemcpyHostToDevice*/);
//
err = cudaMemcpyToSymbol(fVPLPossibility_gpu, &fVPLPossibility, sizeof(float) /*, 0, cudaMemcpyHostToDevice*/);
err = cudaMemcpyToSymbol(fVPLIllmThreshold_gpu, &fVPLIllmThreshold, sizeof(float) /*, 0, cudaMemcpyHostToDevice*/);
err = cudaMemcpyToSymbol(fVPLAtten_gpu, &fVPLAtten, sizeof(float) /*, 0, cudaMemcpyHostToDevice*/);
cudaThreadSynchronize();
}
unsigned getKdTreeDepth(kd_node *pNode, unsigned nCurrLayer = 0)
{
if(pNode->child0 == NULL && pNode->child1 == NULL)
{
return nCurrLayer;
}
unsigned d1 = getKdTreeDepth(pNode->child0, nCurrLayer + 1);
unsigned d2 = getKdTreeDepth(pNode->child1, nCurrLayer + 1);
return (d1 > d2) ? d1 : d2;
}
unsigned getNodeObjCount(kd_node *pKdNode)
{
unsigned nCurrCount = pKdNode->objects.size();
unsigned n0 = 0, n1 = 0;
if(pKdNode->child0)
{
n0 = getNodeObjCount(pKdNode->child0);
}
if(pKdNode->child1)
{
n1 = getNodeObjCount(pKdNode->child1);
}
return (nCurrCount + n0 + n1);
}
unsigned buildKdTreeForGPUObjs(kd_node *pKdNode, std::vector<Object*> &vObjs, unsigned *nIdCount)
{
for(int i = 0; i < vObjs.size(); i ++)
{
pKdNode->addObject(vObjs[i]);
}
pKdNode->updateBBox();
{
// Combine the two factors this way...
kd_node::nSceneDepth = (nSceneDepth == -1 || nObjDepth == -1) ?
(8 + 1.3 * log(vObjs.size() * 1.f)) : (nSceneDepth + nObjDepth);
printf("[Building KD-tree for Primary Object on GPU of Depth %d...\n", kd_node::nSceneDepth);
pKdNode->split();
}
*nIdCount = getNodeObjCount(pKdNode);
return getKdTreeDepth(pKdNode);
}
static unsigned nCurrInxInx = 0;
void serialize_kd_tree(kd_node *pNode, kd_node_gpu *hostKdRoot, unsigned *hostPrimObjIdList, unsigned nodeId = 0)
{
if(pNode == NULL) return;
kd_node_gpu *pNodeGpu = hostKdRoot + nodeId;
pNodeGpu->_nDepth = pNode->_nDepth;
pNodeGpu->_bbox._xmin = pNode->_bbox._xmin;
pNodeGpu->_bbox._ymin = pNode->_bbox._ymin;
pNodeGpu->_bbox._zmin = pNode->_bbox._zmin;
pNodeGpu->_bbox._xmax = pNode->_bbox._xmax;
pNodeGpu->_bbox._ymax = pNode->_bbox._ymax;
pNodeGpu->_bbox._zmax = pNode->_bbox._zmax;
pNodeGpu->eAxis = pNode->eAxis;
pNodeGpu->_delim = pNode->_delim;
// assign objects
pNodeGpu->nInxStartInx = nCurrInxInx;
pNodeGpu->nInxCount = pNode->objects.size();
if(pNode->objects.size() > 0)
{
//printf("{(%d) ", nodeId);
for(int i = 0; i < pNodeGpu->nInxCount; i ++)
{
//printf("%d ", pNode->objects[i]->_id);
hostPrimObjIdList[nCurrInxInx] = pNode->objects[i]->_id;
nCurrInxInx ++;
}
//printf(" (%d)} ", pNodeGpu->nInxCount);
}
// Recursive
//
kd_node *pLeft = pNode->child0;
if(pLeft)
{
unsigned lid = (nodeId + 1) * 2 - 1;
pNodeGpu->child0Inx = lid;
serialize_kd_tree(pLeft, hostKdRoot, hostPrimObjIdList, lid);
}
kd_node *pRight = pNode->child1;
if(pRight)
{
unsigned rid = (nodeId + 1) * 2;
pNodeGpu->child1Inx = rid;
serialize_kd_tree(pRight, hostKdRoot, hostPrimObjIdList, rid);
}
}
void copySceneGeomotry()
{
///
/// NOTE: All kinds of geometry in CPU side. Since CUDA doesn't support
/// C++ features as polymorphism, they have to be handled separately.
///
std::vector<Object*> v4kdtree;
// Count all primary objs
//
unsigned nTotalPrimaryCount = 0;
for(int i = 0; i < scene.getObjectNum(); i ++)
{
Object *pObj = scene.getObject(i);
ObjType eType = pObj->getObjType();
if(eType != OBJ_CPU)
{
if(eType != CUBE_CPU)
{
nTotalPrimaryCount ++;
}
else
{
nTotalPrimaryCount += 6;
}
}
else
{
ObjObject *pObjObj = dynamic_cast<ObjObject*>(pObj); // I know...
nTotalPrimaryCount += pObjObj->getTriCount();
}
}
v4kdtree.reserve(nTotalPrimaryCount);
// 1. Re-Alloc space for Objects
if(gpuObjs)
{
cudaFree(gpuObjs);
}
cudaError_t err = cudaMalloc((void**)&gpuObjs, sizeof(PrimGpuObj) * nTotalPrimaryCount);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
if(hostObjs)
{
free(hostObjs);
}
hostObjs = (PrimGpuObj_host*)malloc(sizeof(PrimGpuObj_host) * nTotalPrimaryCount);
if(!hostObjs)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// 2. Copy Objects
unsigned nCurrPrimObjInx = 0;
for(int i = 0; i < scene.getObjectNum(); i ++)
{
Object *pObj = scene.getObject(i);
ObjType eType = pObj->getObjType();
if(eType != OBJ_CPU)
{
PrimGpuObj_host *pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
// Copy the common part
pCurrPrimGpuObj->nId = pObj->_id;
pCurrPrimGpuObj->_fReflectionRatio = pObj->getReflectionRatio();
pCurrPrimGpuObj->_fRefractionRatio = pObj->getRefractionRatio();
pCurrPrimGpuObj->_fRefractionK = pObj->getRefractionK();
pCurrPrimGpuObj->_fEmitRatio = pObj->getEmissionRatio();
pCurrPrimGpuObj->_mat.fShininess = pObj->_mat.fShininess;
vecCopy(pCurrPrimGpuObj->_mat.specColor, pObj->_mat.specColor);
vecCopy(pCurrPrimGpuObj->_mat.diffColor, pObj->_mat.diffColor);
vecCopy(pCurrPrimGpuObj->_mat.ambiColor, pObj->_mat.ambiColor);
switch(eType)
{
case SPH_CPU:
{
Sphere *pSph = dynamic_cast<Sphere*>(pObj);
copySphere(pCurrPrimGpuObj, pSph);
v4kdtree.push_back(pSph);
nCurrPrimObjInx ++;
}
break;
case SQU_CPU:
{
Square *pSqu = dynamic_cast<Square*>(pObj);
copySquare(pCurrPrimGpuObj, pSqu);
v4kdtree.push_back(pSqu);
nCurrPrimObjInx ++;
}
break;
case CUBE_CPU:
{
Cube *pCube = dynamic_cast<Cube*>(pObj);
for(int m = 0; m < 6; m ++)
{
pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
copySquare(pCurrPrimGpuObj, pCube->_vs[m]);
v4kdtree.push_back(pCube->_vs[m]);
nCurrPrimObjInx ++;
}
}
break;
default:
printf("not supported obj type \n");
return;
break;
}
}
else
{
ObjObject *pObjObj = dynamic_cast<ObjObject*>(pObj); // I know...
unsigned nCurrTriNum = pObjObj->getTriCount();
for(int j = 0; j < nCurrTriNum; j ++)
{
Triangle *pCurrTri = pObjObj->getTriangle(j);
PrimGpuObj_host *pCurrPrimGpuObj = &hostObjs[nCurrPrimObjInx];
copyTriangle(pCurrPrimGpuObj, pObjObj, pCurrTri);
v4kdtree.push_back(pCurrTri);
nCurrPrimObjInx ++;
}// for
}// else
}// copy for
///
/// Build KD-Tree for GPU only
///
kd_node *pNode = new kd_node;
unsigned nTotalIdCount = 0;
unsigned nMaxDepth = buildKdTreeForGPUObjs(pNode, v4kdtree, &nTotalIdCount);
nMaxGpuKdDepth = nMaxDepth + 1;
// Get ready for the KD-Tree on GPU
//
unsigned nKdTreeNodeCount = pow(2.f, (nMaxDepth + 2) * 1.f) - 1;
nMaxKdNodeCount = nKdTreeNodeCount;
// Kd-tree recursion record buf.
if(deviceKdRecBuf)
{
cudaFree(deviceKdRecBuf);
}
err = cudaMalloc(&deviceKdRecBuf, sizeof(bool) * nKdTreeNodeCount * WinWidth);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
// Re-Alloc Kd-tree sapce
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
hostKdRoot = (kd_node_gpu *)malloc(sizeof(kd_node_gpu) * nKdTreeNodeCount );
if(!hostKdRoot)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
memset(hostKdRoot, 0 ,sizeof(kd_node_gpu) * nKdTreeNodeCount);
for(int i = 0; i < nKdTreeNodeCount; i ++)
{
(hostKdRoot + i)->child0Inx = -1;
(hostKdRoot + i)->child1Inx = -1;
(hostKdRoot + i)->nInxCount = 0;
}
if(deviceKdRoot)
{
cudaFree(deviceKdRoot);
deviceKdRoot = NULL;
}
err = cudaMalloc(&deviceKdRoot, sizeof(kd_node_gpu) * nKdTreeNodeCount);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
// Id list buf on GPU
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
}
hostPrimObjIdList = (unsigned *)malloc(sizeof(unsigned) * nTotalIdCount);// BUG: id could be repeated
if(!hostPrimObjIdList)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
if(devicePrimObjIdList)
{
cudaFree(devicePrimObjIdList);
}
err = cudaMalloc(&devicePrimObjIdList, sizeof(unsigned) * nTotalIdCount);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
/// Serialize KD-Tree and PrimaryObject Ids
serialize_kd_tree(pNode, hostKdRoot, hostPrimObjIdList);
nCurrInxInx = 0;
// copy KD-tree data from host to device
err = cudaMemcpy(deviceKdRoot, hostKdRoot, sizeof(kd_node_gpu) * nKdTreeNodeCount, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
err = cudaMemcpy(devicePrimObjIdList, hostPrimObjIdList, sizeof(unsigned) * nTotalIdCount, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
delete pNode;
// cuda copy objs
//
nCurrObjCount = nTotalPrimaryCount;
err = cudaMemcpy(gpuObjs, hostObjs, sizeof(PrimGpuObj) * nTotalPrimaryCount, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
cudaThreadSynchronize();
}
void copySceneLights()
{
///
/// NOTE: All kinds of lights in CPU side. Since CUDA doesn't support
/// C++ features as polymorphism, they have to be handled separately.
///
// 1. Count Lights
//
unsigned nLightCount = scene.getLightNum();
// re-alloc space
if(gpuLights)
{
cudaFree(gpuLights);
}
cudaError_t err = cudaMalloc(&gpuLights, sizeof(LightGpu) * nLightCount);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
if(cpuLights)
{
free(cpuLights);
}
cpuLights = (LightCpu*)malloc(sizeof(LightCpu) * nLightCount);
if(!cpuLights)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// copy to host
for(int i = 0; i < nLightCount; i ++)
{
Light *pLight = scene.getLight(i);
LightType eType = pLight->getType();
LightCpu *pCurrCpuLight = cpuLights + i;
pCurrCpuLight->eType = eType;
// common
pCurrCpuLight->_fAttenuate = pLight->_fAttenuate;
vecCopy(pCurrCpuLight->_ambientColor, pLight->_ambientColor);
vecCopy(pCurrCpuLight->_diffuseColor, pLight->_diffuseColor);
vecCopy(pCurrCpuLight->_specularColor, pLight->_specularColor);
switch(eType)
{
case OMNI_P:
{
OmniPointLight *pOPl = dynamic_cast<OmniPointLight*>(pLight);
vecCopy(pCurrCpuLight->_omni_pos, pOPl->_pos);
}
break;
case DIR_P:
{
DirPointLight *pDPl = dynamic_cast<DirPointLight*>(pLight);
vecCopy(pCurrCpuLight->_dirp_pos, pDPl->_pos);
vecCopy(pCurrCpuLight->_dirp_dir, pDPl->_dir);
}
break;
case DIR:
{
DirLight *pDl = dynamic_cast<DirLight*>(pLight);
vecCopy(pCurrCpuLight->_dir_dir, pDl->_dir);
}
break;
}
}
// copy to gpu
//
nCurrLightCount = nLightCount;
cudaMemcpy(gpuLights, cpuLights, sizeof(LightGpu) * nLightCount, cudaMemcpyHostToDevice);
// Alloc Diffuse Lights space
//
err = cudaMalloc(&gpuDiffLightsPerLine, sizeof(LightCpu) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
cpuDiffLightsPerLine = (LightCpu *)malloc(sizeof(LightGpu) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(!cpuDiffLightsPerLine)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
// Random number
err = cudaMalloc(&gpuRdm, sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(err != cudaSuccess)
{
printf("CUDA error %s, %s, %s \n", __FILE__, __LINE__, cudaGetErrorString(err));
}
cpuRdm = (float *)malloc(sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE);
if(!cpuRdm)
{
printf("malloc failed %s, %s \n", __FILE__, __LINE__);
}
for(int i = 0; i < WinWidth ; i ++)
for(int j = 0; j < MAX_RAY_COUNT_PER_TREE; j ++)
{
*(cpuRdm + j + i * MAX_RAY_COUNT_PER_TREE) = (rand() % 1000000) / 1000000.f;
}
cudaMemcpy(gpuRdm, cpuRdm, sizeof(float) * WinWidth * MAX_RAY_COUNT_PER_TREE, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
}
void releaseSceneGeomotry()
{
nCurrObjCount = 0;
if(gpuObjs)
{
cudaFree(gpuObjs);
gpuObjs = NULL;
}
if(hostObjs)
{
free(hostObjs);
hostObjs = NULL;
}
if(deviceKdRecBuf)
{
cudaFree(deviceKdRecBuf);
deviceKdRecBuf = NULL;
}
if(hostKdRoot)
{
free(hostKdRoot);
hostKdRoot = NULL;
}
if(deviceKdRoot)
{
cudaFree(deviceKdRoot);
deviceKdRoot = NULL;
}
if(hostPrimObjIdList)
{
free(hostPrimObjIdList);
hostPrimObjIdList = NULL;
}
if(devicePrimObjIdList)
{
cudaFree(devicePrimObjIdList);
devicePrimObjIdList = NULL;
}
//
if(gpuDiffLightsPerLine)
{
cudaFree(gpuDiffLightsPerLine);
gpuDiffLightsPerLine = NULL;
}
if(cpuDiffLightsPerLine)
{
free(cpuDiffLightsPerLine);
cpuDiffLightsPerLine = NULL;
}
}
void releaseSceneLights()
{
nCurrLightCount = 0;
if(gpuLights)
{
cudaFree(gpuLights);
gpuLights = NULL;
}
if(cpuLights)
{
free(cpuLights);
cpuLights = NULL;
}
}
static void copyTriangle(PrimGpuObj_host *pCurrPrimGpuObj, ObjObject *pObjObj, Triangle *pCurrTri)
{
pCurrPrimGpuObj->eType = TRI_GPU;
pCurrPrimGpuObj->nId = pCurrTri->_id;
pCurrPrimGpuObj->_fReflectionRatio = pObjObj->getReflectionRatio();
pCurrPrimGpuObj->_fRefractionRatio = pObjObj->getRefractionRatio();
pCurrPrimGpuObj->_fRefractionK = pObjObj->getRefractionK();
pCurrPrimGpuObj->_fEmitRatio = pObjObj->getEmissionRatio();
pCurrPrimGpuObj->_mat.fShininess = pObjObj->_mat.fShininess;
vecCopy(pCurrPrimGpuObj->_mat.specColor, pObjObj->_mat.specColor);
vecCopy(pCurrPrimGpuObj->_mat.diffColor, pObjObj->_mat.diffColor);
vecCopy(pCurrPrimGpuObj->_mat.ambiColor, pObjObj->_mat.ambiColor);
for(int n = 0; n < 3; n ++)
{
vecCopy(pCurrPrimGpuObj->_vertices[n], pCurrTri->_vertices[n]);
vecCopy(pCurrPrimGpuObj->_vnormal[n], pCurrTri->_vnormal[n]);
}
vecCopy(pCurrPrimGpuObj->_normal, pCurrTri->_normal);
pCurrPrimGpuObj->_bSmooth = pObjObj->_bSmooth;
pCurrPrimGpuObj->_bHasVNorm = pObjObj->_bHasVNorm;
// NOTE: not supported yet
pCurrPrimGpuObj->pTex = NULL;
}
static void copySquare(PrimGpuObj_host *pCurrPrimGpuObj, Square *pSqu)
{
pCurrPrimGpuObj->eType = SQU_GPU;
pCurrPrimGpuObj->nId = pSqu->_id;
vecCopy(pCurrPrimGpuObj->_vNormal, pSqu->_vNormal);
vecCopy(pCurrPrimGpuObj->_vWidthVec, pSqu->_vWidthVec);
vecCopy(pCurrPrimGpuObj->_vCenter, pSqu->_vCenter);
pCurrPrimGpuObj->_nWidth = pSqu->_nWidth;
pCurrPrimGpuObj->_nHeight = pSqu->_nHeight;
vecCopy(pCurrPrimGpuObj->_v2HeightVec, pSqu->_v2HeightVec);
vecCopy(pCurrPrimGpuObj->_v2WidthVec, pSqu->_v2WidthVec);
pCurrPrimGpuObj->a = pSqu->a;
pCurrPrimGpuObj->b = pSqu->b;
pCurrPrimGpuObj->c = pSqu->c;
pCurrPrimGpuObj->d = pSqu->d;
// Tex
if(pSqu->_tex)
{
int nMipInx = 0;
int texId = TextureManager::find(pSqu->_tex, &nMipInx);
if(texId != -1)
{
pCurrPrimGpuObj->pTex = pTexGpu + texId * MAX_GPU_MIPMAP_COUNT + nMipInx;
pCurrPrimGpuObj->eTexType = pSqu->_eTexMapType;
}
}
else
{
pCurrPrimGpuObj->pTex = NULL;
}
}
static void copySphere(PrimGpuObj_host *pCurrPrimGpuObj, Sphere *pSph)
{
pCurrPrimGpuObj->nId = pSph->_id;
pCurrPrimGpuObj->eType = SPH_GPU;
pCurrPrimGpuObj->_fRad = pSph->_fRad;
vecCopy(pCurrPrimGpuObj->_ctr, pSph->_ctr);
// Tex
int nMipInx = 0;
int texId = TextureManager::find(pSph->_tex, &nMipInx);
if(texId != -1)
{
pCurrPrimGpuObj->pTex = pTexGpu + texId * MAX_GPU_MIPMAP_COUNT + nMipInx;
pCurrPrimGpuObj->eTexType = pSph->_eTexMapType;
}
else
{
pCurrPrimGpuObj->pTex = NULL;
}
} |
3104fbbdcf1f3cc352ffa21ad8cedd6bf80b0d2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "heat2d.h"
void Manage_Memory(int phase, int tid, float **h_u, float **d_u, float **d_un){
if (phase==0) {
// Allocate whole domain in host (master thread)
*h_u = (float*)malloc(NY*NX*sizeof(float));
}
if (phase==1) {
// Allocate whole domain in device (GPU thread)
hipError_t Error = hipSetDevice(tid);
if (DEBUG) printf("CUDA error (hipSetDevice) = %s\n",hipGetErrorString(Error));
Error = hipMalloc((void**)d_u ,NY*NX*sizeof(float));
if (DEBUG) printf("CUDA error (hipMalloc) = %s\n",hipGetErrorString(Error));
Error = hipMalloc((void**)d_un,NY*NX*sizeof(float));
if (DEBUG) printf("CUDA error (hipMalloc) = %s\n",hipGetErrorString(Error));
}
if (phase==2) {
// Free the whole domain variables (master thread)
free(*h_u);
hipError_t Error;
Error = hipFree(*d_u);
if (DEBUG) printf("CUDA error (hipFree) = %s\n",hipGetErrorString(Error));
Error = hipFree(*d_un);
if (DEBUG) printf("CUDA error (hipFree) = %s\n",hipGetErrorString(Error));
}
}
void Manage_Comms(int phase, int tid, float **h_u, float **d_u) {
// Manage CPU-GPU communicastions
if (DEBUG) printf(":::::::: Performing Comms (phase %d) ::::::::\n",phase);
if (phase == 0) {
// move h_u (from HOST) to d_u (to GPU)
hipError_t Error = hipMemcpy(*d_u,*h_u,NY*NX*sizeof(float),hipMemcpyHostToDevice);
if (DEBUG) printf("CUDA error (memcpy h -> d ) = %s\n",hipGetErrorString(Error));
}
if (phase == 1) {
// move d_u (from GPU) to h_u (to HOST)
hipError_t Error = hipMemcpy(*h_u,*d_u,NY*NX*sizeof(float),hipMemcpyDeviceToHost);
if (DEBUG) printf("CUDA error (memcpy d -> h ) = %s\n",hipGetErrorString(Error));
}
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,u[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
__global__ void SetIC_onDevice(float *u0){
int i, j, o, IC;
// threads id
i = threadIdx.x + blockIdx.x*blockDim.x;
j = threadIdx.y + blockIdx.y*blockDim.y;
// select IC
IC=2;
switch (IC) {
case 1: {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX+1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX+1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY+1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY+1); // right
break;
}
case 3: {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
break;
}
// here to add another IC
}
}
void Call_GPU_Init(float **u0){
// Load the initial condition
dim3 threads(32,32);
dim3 blocks((NX+1)/32,(NY+1)/32);
hipLaunchKernelGGL(( SetIC_onDevice), dim3(blocks), dim3(threads), 0, 0, *u0);
}
__global__ void Laplace2d(const float * __restrict__ u, float * __restrict__ un){
int o, n, s, e, w;
// Threads id
const int i = threadIdx.x + blockIdx.x*blockDim.x;
const int j = threadIdx.y + blockIdx.y*blockDim.y;
o = i + (NX*j); // node( j,i,k ) n
n = (i==NX-1) ? o:o+NX; // node(j+1,i,k) |
s = (i==0) ? o:o-NX; // node(j-1,i,k) w--o--e
e = (j==NY-1) ? o:o+1; // node(j,i+1,k) |
w = (j==0) ? o:o-1; // node(j,i-1,k) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
__global__ void Laplace2d_v2(const float * __restrict__ u, float * __restrict__ un){
// Global Threads id
int j = threadIdx.x + blockIdx.x*blockDim.x;
int i = threadIdx.y + blockIdx.y*blockDim.y;
// Local Threads id
int lj = threadIdx.x;
int li = threadIdx.y;
// e_XX --> variables refers to expanded shared memory location in order to accomodate halo elements
//Current Local ID with radius offset.
int e_li = li + RADIUS;
int e_lj = lj + RADIUS;
// Variable pointing at top and bottom neighbouring location
int e_li_prev = e_li - 1;
int e_li_next = e_li + 1;
// Variable pointing at left and right neighbouring location
int e_lj_prev = e_lj - 1;
int e_lj_next = e_lj + 1;
__shared__ float sData [NJ+2*RADIUS][NI+2*RADIUS];
unsigned int index = (i)* NY + (j) ;
// copy top and bottom halo
if (li<RADIUS) {
//Copy Top Halo Element
if (blockIdx.y > 0) // Boundary check
sData[li][e_lj] = u[index - RADIUS * NY];
//Copy Bottom Halo Element
if (blockIdx.y < (gridDim.y-1)) // Boundary check
sData[e_li+NJ][e_lj] = u[index + NJ * NY];
}
// copy left and right halo
if (lj<RADIUS) {
if( blockIdx.x > 0) // Boundary check
sData[e_li][lj] = u[index - RADIUS];
if(blockIdx.x < (gridDim.x-1)) // Boundary check
sData[e_li][e_lj+NI] = u[index + NI];
}
// copy current location
sData[e_li][e_lj] = u[index];
__syncthreads( );
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[index] = sData[e_li][e_lj]
+ KX*(sData[e_li_prev][e_lj]-2*sData[e_li][e_lj]+sData[e_li_next][e_lj])
+ KY*(sData[e_li][e_lj_prev]-2*sData[e_li][e_lj]+sData[e_li][e_lj_next]);
} else {
un[index] = sData[e_li][e_lj];
}
}
void Call_Laplace(float **d_u, float **d_un) {
// Produce one iteration of the laplace operator
dim3 threads(NI,NJ);
dim3 blocks((NX+NI-1)/NI,(NY+NJ-1)/NJ);
//Laplace2d<<<blocks,threads>>>(*d_u,*d_un);
hipLaunchKernelGGL(( Laplace2d_v2), dim3(blocks),dim3(threads), 0, 0, *d_u,*d_un);
if (DEBUG) printf("CUDA error (Jacobi_Method) %s\n",hipGetErrorString(hipPeekAtLastError()));
hipError_t Error = hipDeviceSynchronize();
if (DEBUG) printf("CUDA error (Jacobi_Method Synchronize) %s\n",hipGetErrorString(Error));
}
| 3104fbbdcf1f3cc352ffa21ad8cedd6bf80b0d2c.cu |
#include "heat2d.h"
void Manage_Memory(int phase, int tid, float **h_u, float **d_u, float **d_un){
if (phase==0) {
// Allocate whole domain in host (master thread)
*h_u = (float*)malloc(NY*NX*sizeof(float));
}
if (phase==1) {
// Allocate whole domain in device (GPU thread)
cudaError_t Error = cudaSetDevice(tid);
if (DEBUG) printf("CUDA error (cudaSetDevice) = %s\n",cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_u ,NY*NX*sizeof(float));
if (DEBUG) printf("CUDA error (cudaMalloc) = %s\n",cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_un,NY*NX*sizeof(float));
if (DEBUG) printf("CUDA error (cudaMalloc) = %s\n",cudaGetErrorString(Error));
}
if (phase==2) {
// Free the whole domain variables (master thread)
free(*h_u);
cudaError_t Error;
Error = cudaFree(*d_u);
if (DEBUG) printf("CUDA error (cudaFree) = %s\n",cudaGetErrorString(Error));
Error = cudaFree(*d_un);
if (DEBUG) printf("CUDA error (cudaFree) = %s\n",cudaGetErrorString(Error));
}
}
void Manage_Comms(int phase, int tid, float **h_u, float **d_u) {
// Manage CPU-GPU communicastions
if (DEBUG) printf(":::::::: Performing Comms (phase %d) ::::::::\n",phase);
if (phase == 0) {
// move h_u (from HOST) to d_u (to GPU)
cudaError_t Error = cudaMemcpy(*d_u,*h_u,NY*NX*sizeof(float),cudaMemcpyHostToDevice);
if (DEBUG) printf("CUDA error (memcpy h -> d ) = %s\n",cudaGetErrorString(Error));
}
if (phase == 1) {
// move d_u (from GPU) to h_u (to HOST)
cudaError_t Error = cudaMemcpy(*h_u,*d_u,NY*NX*sizeof(float),cudaMemcpyDeviceToHost);
if (DEBUG) printf("CUDA error (memcpy d -> h ) = %s\n",cudaGetErrorString(Error));
}
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,u[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
__global__ void SetIC_onDevice(float *u0){
int i, j, o, IC;
// threads id
i = threadIdx.x + blockIdx.x*blockDim.x;
j = threadIdx.y + blockIdx.y*blockDim.y;
// select IC
IC=2;
switch (IC) {
case 1: {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX+1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX+1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY+1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY+1); // right
break;
}
case 3: {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
break;
}
// here to add another IC
}
}
void Call_GPU_Init(float **u0){
// Load the initial condition
dim3 threads(32,32);
dim3 blocks((NX+1)/32,(NY+1)/32);
SetIC_onDevice<<<blocks, threads>>>(*u0);
}
__global__ void Laplace2d(const float * __restrict__ u, float * __restrict__ un){
int o, n, s, e, w;
// Threads id
const int i = threadIdx.x + blockIdx.x*blockDim.x;
const int j = threadIdx.y + blockIdx.y*blockDim.y;
o = i + (NX*j); // node( j,i,k ) n
n = (i==NX-1) ? o:o+NX; // node(j+1,i,k) |
s = (i==0) ? o:o-NX; // node(j-1,i,k) w--o--e
e = (j==NY-1) ? o:o+1; // node(j,i+1,k) |
w = (j==0) ? o:o-1; // node(j,i-1,k) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
__global__ void Laplace2d_v2(const float * __restrict__ u, float * __restrict__ un){
// Global Threads id
int j = threadIdx.x + blockIdx.x*blockDim.x;
int i = threadIdx.y + blockIdx.y*blockDim.y;
// Local Threads id
int lj = threadIdx.x;
int li = threadIdx.y;
// e_XX --> variables refers to expanded shared memory location in order to accomodate halo elements
//Current Local ID with radius offset.
int e_li = li + RADIUS;
int e_lj = lj + RADIUS;
// Variable pointing at top and bottom neighbouring location
int e_li_prev = e_li - 1;
int e_li_next = e_li + 1;
// Variable pointing at left and right neighbouring location
int e_lj_prev = e_lj - 1;
int e_lj_next = e_lj + 1;
__shared__ float sData [NJ+2*RADIUS][NI+2*RADIUS];
unsigned int index = (i)* NY + (j) ;
// copy top and bottom halo
if (li<RADIUS) {
//Copy Top Halo Element
if (blockIdx.y > 0) // Boundary check
sData[li][e_lj] = u[index - RADIUS * NY];
//Copy Bottom Halo Element
if (blockIdx.y < (gridDim.y-1)) // Boundary check
sData[e_li+NJ][e_lj] = u[index + NJ * NY];
}
// copy left and right halo
if (lj<RADIUS) {
if( blockIdx.x > 0) // Boundary check
sData[e_li][lj] = u[index - RADIUS];
if(blockIdx.x < (gridDim.x-1)) // Boundary check
sData[e_li][e_lj+NI] = u[index + NI];
}
// copy current location
sData[e_li][e_lj] = u[index];
__syncthreads( );
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[index] = sData[e_li][e_lj]
+ KX*(sData[e_li_prev][e_lj]-2*sData[e_li][e_lj]+sData[e_li_next][e_lj])
+ KY*(sData[e_li][e_lj_prev]-2*sData[e_li][e_lj]+sData[e_li][e_lj_next]);
} else {
un[index] = sData[e_li][e_lj];
}
}
void Call_Laplace(float **d_u, float **d_un) {
// Produce one iteration of the laplace operator
dim3 threads(NI,NJ);
dim3 blocks((NX+NI-1)/NI,(NY+NJ-1)/NJ);
//Laplace2d<<<blocks,threads>>>(*d_u,*d_un);
Laplace2d_v2<<<blocks,threads>>>(*d_u,*d_un);
if (DEBUG) printf("CUDA error (Jacobi_Method) %s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaError_t Error = cudaDeviceSynchronize();
if (DEBUG) printf("CUDA error (Jacobi_Method Synchronize) %s\n",cudaGetErrorString(Error));
}
|
f1400b7bed56fe3dac8838b3908ac1bfb143f17e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
hipStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::jensen_shannon_distance_op,
double,
double,
double,
raft::identity_op,
int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| f1400b7bed56fe3dac8838b3908ac1bfb143f17e.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::jensen_shannon_distance_op,
double,
double,
double,
raft::identity_op,
int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
|
bbe56db0021a207d5b4a94321d0657ce15c3a505.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdbool.h>
#pragma comment(lib,"Ws2_32.lib")
#include <string.h>
#include <windows.h> // sleep
#include <winsock2.h>
#include "string_util.cu"
#include "sha1.c"
#define CUDA_THREADS 512
static char* username = NULL;
unsigned char threads_count = 0;
bool request_job(SOCKET sock) {
//const char*
char* req = NULL;
req = make_req(username, NULL);
// printf("%s\n", req);
if (send(sock, req, fast_strlen(req), 0) < 0) {
printf("request_job() : failed\n");
return 0;
}
else {
free(req);
return 1;
}
}
bool parse_args(int argc, char** argv) {
for (unsigned char i = 1; i < (unsigned char)argc; i += 2) {
if (strcmp(argv[i], "--threads") == 0) {
threads_count = (unsigned char)atoi(argv[i + 1]);
printf("Threads: %i\n", threads_count);
}
else if (strcmp(argv[i], "--user") == 0) {
username = argv[i + 1];
printf("User: %s\n", username);
}
}
if (!username) {
printf("parse_args(): use --user to set a username!\n");
return 0;
}
printf("parse_args(): you have %i threads\n", threads_count);
return 1;
}
SOCKET connect_to_server(char* ip, unsigned short port) {
printf("connect_to_server(): connecting to %s:%hu...\n", ip, port);
WSADATA wsa;
SOCKET s;
struct sockaddr_in server;
char server_reply[4];
printf("connect_to_server(): initializing socket...\n");
if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0) {
printf("connect_to_server(): failed. Error Code : %d", WSAGetLastError());
return 0;
}
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) {
printf("connect_to_server(): Could not create socket : %d", WSAGetLastError());
return INVALID_SOCKET;
}
server.sin_addr.s_addr = inet_addr(ip);
server.sin_family = AF_INET;
server.sin_port = htons(port);
//Connect to remote server
if (connect(s, (struct sockaddr*)&server, sizeof(server)) < 0) {
printf("connect_to_server(): connect error\n");
return 0;
}
printf("connect_to_server(): connected successfully!\n");
//Receive a reply from the server
if ((recv(s, server_reply, 3, 0)) == SOCKET_ERROR) {
printf("connect_to_server(): recv version failed\n");
return 0;
}
server_reply[3] = '\0';
printf("connect_to_server(): server version: "); printf("%s", server_reply); printf("\n");
return s;
}
// Iterative function to implement itoa() function in C
__device__ char* cuda_itoa(char str[], int num)
{
int i, rem, len = 0, n;
n = num;
while (n != 0)
{
len++;
n /= 10;
}
for (i = 0; i < len; i++)
{
rem = num % 10;
num = num / 10;
str[len - (i + 1)] = rem + '0';
}
str[len] = '\0';
}
__device__ __forceinline__ int cuda_bytecmp(register const byte* s1, register const byte* s2) {
register unsigned char n = 11;
do {
if (*s1 != *s2++)
return 0;
if (*s1++ == 0)
break;
} while (--n != 0);
return 1;
}
__global__ void sha1Kernel(unsigned int* result, char* prefix, byte* target, unsigned int* diff) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if (*result != 0 || index > *diff * 100) {
return;
}
char buffer[32];
byte final_hash[20];
SHA1_CTX sha1;
sha1_init(&sha1);
sha1_update(&sha1, (const byte*)prefix, 40);
cuda_itoa(buffer, index);
sha1_update(&sha1, (const byte*)buffer, cuda_fast_strlen(buffer));
sha1_final(&sha1, final_hash);
if (cuda_bytecmp(final_hash, target) == true) {
*result = index;
}
}
unsigned int process_job(SOCKET sock, unsigned int* dev_result, char* dev_prefix, byte* dev_target, unsigned int* dev_diff) {
char buffer[100];
int size = recv(sock, buffer, 100, 0);
if (size == 0) {
printf("process_job(): server return zero bytes!\n");
return 0;
}
buffer[size] = '\0';
if (buffer[0] == 'B' && buffer[1] == 'A' && buffer[2] == 'D' && buffer[3] == '\0')
return 0;
unsigned short id = 0;
char* prefix = read_to(buffer, ',', &id);
char* job = read_to(buffer + id, ',', &id);
byte target[20];
hexToBytes(target, job);
int diff = atoi(buffer + id);
unsigned int result = 0;
hipError_t cudaerror;
// printf("%s\n%s\n%i\n", prefix, job, diff);
hipMemcpy(dev_prefix, prefix, 41, hipMemcpyHostToDevice);
cudaerror = hipGetLastError();
if (cudaerror != hipSuccess) {
printf("dev_prefix malloc error: %s\n", hipGetErrorString(cudaerror));
}
hipMemcpy(dev_target, &target, 20, hipMemcpyHostToDevice);
cudaerror = hipGetLastError();
if (cudaerror != hipSuccess) {
printf("dev_target malloc error: %s\n", hipGetErrorString(cudaerror));
}
hipMemcpy(dev_diff, &diff, sizeof(unsigned int), hipMemcpyHostToDevice);
cudaerror = hipGetLastError();
if (cudaerror != hipSuccess) {
printf("dev_diff malloc error: %s\n", hipGetErrorString(cudaerror));
}
hipMemcpy(dev_result, &result, sizeof(unsigned int), hipMemcpyHostToDevice);
cudaerror = hipGetLastError();
if (cudaerror != hipSuccess) {
printf("dev_diff malloc error: %s\n", hipGetErrorString(cudaerror));
}
hipLaunchKernelGGL(( sha1Kernel) , dim3((unsigned long)((100 * diff) / CUDA_THREADS) + 1), dim3(CUDA_THREADS), 0, 0, dev_result, dev_prefix, dev_target, dev_diff);
hipDeviceSynchronize();
cudaerror = hipGetLastError();
if (cudaerror != hipSuccess) {
printf("sha1Kernel execution error: %s\nIt's possible that you got blocked by the server.\n", hipGetErrorString(cudaerror));
closesocket(sock);
exit(-1);
}
hipMemcpy(&result, dev_result, sizeof(unsigned int), hipMemcpyDeviceToHost);
free(prefix);
free(job);
return result;
}
void send_job(SOCKET sock, unsigned int job) {
char job_result[64];
itoa(job, job_result, 10);
strcat(job_result, ",,CoinMiner");
if (send(sock, job_result, fast_strlen(job_result), 0) < 0) {
printf("send_job() : failed\n");
}
char server_reply[6];
if ((recv(sock, server_reply, 6, 0)) == SOCKET_ERROR) {
printf("send_job(): recv version failed\n");
}
printf("%s - %s\n", job_result, server_reply);
server_reply[0] = '\0';
server_reply[1] = '\0';
server_reply[2] = '\0';
server_reply[3] = '\0';
server_reply[4] = '\0';
server_reply[5] = '\0';
} | bbe56db0021a207d5b4a94321d0657ce15c3a505.cu | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdbool.h>
#pragma comment(lib,"Ws2_32.lib")
#include <string.h>
#include <windows.h> //Äëÿ sleep
#include <winsock2.h>
#include "string_util.cu"
#include "sha1.c"
#define CUDA_THREADS 512
static char* username = NULL;
unsigned char threads_count = 0;
bool request_job(SOCKET sock) {
//const char*
char* req = NULL;
req = make_req(username, NULL);
// printf("%s\n", req);
if (send(sock, req, fast_strlen(req), 0) < 0) {
printf("request_job() : failed\n");
return 0;
}
else {
free(req);
return 1;
}
}
bool parse_args(int argc, char** argv) {
for (unsigned char i = 1; i < (unsigned char)argc; i += 2) {
if (strcmp(argv[i], "--threads") == 0) {
threads_count = (unsigned char)atoi(argv[i + 1]);
printf("Threads: %i\n", threads_count);
}
else if (strcmp(argv[i], "--user") == 0) {
username = argv[i + 1];
printf("User: %s\n", username);
}
}
if (!username) {
printf("parse_args(): use --user to set a username!\n");
return 0;
}
printf("parse_args(): you have %i threads\n", threads_count);
return 1;
}
SOCKET connect_to_server(char* ip, unsigned short port) {
printf("connect_to_server(): connecting to %s:%hu...\n", ip, port);
WSADATA wsa;
SOCKET s;
struct sockaddr_in server;
char server_reply[4];
printf("connect_to_server(): initializing socket...\n");
if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0) {
printf("connect_to_server(): failed. Error Code : %d", WSAGetLastError());
return 0;
}
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) {
printf("connect_to_server(): Could not create socket : %d", WSAGetLastError());
return INVALID_SOCKET;
}
server.sin_addr.s_addr = inet_addr(ip);
server.sin_family = AF_INET;
server.sin_port = htons(port);
//Connect to remote server
if (connect(s, (struct sockaddr*)&server, sizeof(server)) < 0) {
printf("connect_to_server(): connect error\n");
return 0;
}
printf("connect_to_server(): connected successfully!\n");
//Receive a reply from the server
if ((recv(s, server_reply, 3, 0)) == SOCKET_ERROR) {
printf("connect_to_server(): recv version failed\n");
return 0;
}
server_reply[3] = '\0';
printf("connect_to_server(): server version: "); printf("%s", server_reply); printf("\n");
return s;
}
// Iterative function to implement itoa() function in C
__device__ char* cuda_itoa(char str[], int num)
{
int i, rem, len = 0, n;
n = num;
while (n != 0)
{
len++;
n /= 10;
}
for (i = 0; i < len; i++)
{
rem = num % 10;
num = num / 10;
str[len - (i + 1)] = rem + '0';
}
str[len] = '\0';
}
__device__ __forceinline__ int cuda_bytecmp(register const byte* s1, register const byte* s2) {
register unsigned char n = 11;
do {
if (*s1 != *s2++)
return 0;
if (*s1++ == 0)
break;
} while (--n != 0);
return 1;
}
__global__ void sha1Kernel(unsigned int* result, char* prefix, byte* target, unsigned int* diff) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if (*result != 0 || index > *diff * 100) {
return;
}
char buffer[32];
byte final_hash[20];
SHA1_CTX sha1;
sha1_init(&sha1);
sha1_update(&sha1, (const byte*)prefix, 40);
cuda_itoa(buffer, index);
sha1_update(&sha1, (const byte*)buffer, cuda_fast_strlen(buffer));
sha1_final(&sha1, final_hash);
if (cuda_bytecmp(final_hash, target) == true) {
*result = index;
}
}
unsigned int process_job(SOCKET sock, unsigned int* dev_result, char* dev_prefix, byte* dev_target, unsigned int* dev_diff) {
char buffer[100];
int size = recv(sock, buffer, 100, 0);
if (size == 0) {
printf("process_job(): server return zero bytes!\n");
return 0;
}
buffer[size] = '\0';
if (buffer[0] == 'B' && buffer[1] == 'A' && buffer[2] == 'D' && buffer[3] == '\0')
return 0;
unsigned short id = 0;
char* prefix = read_to(buffer, ',', &id);
char* job = read_to(buffer + id, ',', &id);
byte target[20];
hexToBytes(target, job);
int diff = atoi(buffer + id);
unsigned int result = 0;
cudaError_t cudaerror;
// printf("%s\n%s\n%i\n", prefix, job, diff);
cudaMemcpy(dev_prefix, prefix, 41, cudaMemcpyHostToDevice);
cudaerror = cudaGetLastError();
if (cudaerror != cudaSuccess) {
printf("dev_prefix malloc error: %s\n", cudaGetErrorString(cudaerror));
}
cudaMemcpy(dev_target, &target, 20, cudaMemcpyHostToDevice);
cudaerror = cudaGetLastError();
if (cudaerror != cudaSuccess) {
printf("dev_target malloc error: %s\n", cudaGetErrorString(cudaerror));
}
cudaMemcpy(dev_diff, &diff, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaerror = cudaGetLastError();
if (cudaerror != cudaSuccess) {
printf("dev_diff malloc error: %s\n", cudaGetErrorString(cudaerror));
}
cudaMemcpy(dev_result, &result, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaerror = cudaGetLastError();
if (cudaerror != cudaSuccess) {
printf("dev_diff malloc error: %s\n", cudaGetErrorString(cudaerror));
}
sha1Kernel <<<(unsigned long)((100 * diff) / CUDA_THREADS) + 1, CUDA_THREADS>>> (dev_result, dev_prefix, dev_target, dev_diff);
cudaDeviceSynchronize();
cudaerror = cudaGetLastError();
if (cudaerror != cudaSuccess) {
printf("sha1Kernel execution error: %s\nIt's possible that you got blocked by the server.\n", cudaGetErrorString(cudaerror));
closesocket(sock);
exit(-1);
}
cudaMemcpy(&result, dev_result, sizeof(unsigned int), cudaMemcpyDeviceToHost);
free(prefix);
free(job);
return result;
}
void send_job(SOCKET sock, unsigned int job) {
char job_result[64];
itoa(job, job_result, 10);
strcat(job_result, ",,CoinMiner");
if (send(sock, job_result, fast_strlen(job_result), 0) < 0) {
printf("send_job() : failed\n");
}
char server_reply[6];
if ((recv(sock, server_reply, 6, 0)) == SOCKET_ERROR) {
printf("send_job(): recv version failed\n");
}
printf("%s - %s\n", job_result, server_reply);
server_reply[0] = '\0';
server_reply[1] = '\0';
server_reply[2] = '\0';
server_reply[3] = '\0';
server_reply[4] = '\0';
server_reply[5] = '\0';
} |
c59b0f321296bb1f95aa1b0435ce6fb28f8deb70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
extern "C"
{
void runCudaLand( int myrank );
}
__global__ void Hello_kernel( int myrank );
void runCudaLand( int myrank )
{
int cudaDeviceCount = -1;
int assignedCudaDevice = -1;
hipError_t cE = hipSuccess;
if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
if( (cE = hipGetDevice( &assignedCudaDevice )) != hipSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
if( assignedCudaDevice != (myrank % cudaDeviceCount) )
{
printf("MPI Rank %d: assignedCudaDevice %d NOT EQ to (myrank(%d) mod cudaDeviceCount(%d)) \n",
myrank, assignedCudaDevice, myrank, cudaDeviceCount );
exit(-1);
}
printf("MPI Rank %d: leaving CPU land and going to CUDA Device %d \n", myrank, (myrank % cudaDeviceCount));
hipLaunchKernelGGL(( Hello_kernel), dim3(1),dim3(1), 0, 0, myrank );
hipDeviceSynchronize();
printf("MPI Rank %d: re-entering CPU land \n", myrank );
}
__global__ void Hello_kernel( int myrank )
{
hipError_t cE = hipSuccess;
int device=-12;
int cudaDeviceCount = -1;
/* if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess ) */
/* { */
/* printf(" Rank %d in CUDA: Unable to determine cuda device count, error is %d, count is %d\n", */
/* myrank, cE, cudaDeviceCount ); */
/* } */
if( (cE = hipGetDevice( &device)) != hipSuccess )
{
printf(" Rank %d in CUDA: Unable to determine cuda device number, error is %d, device is %d\n",
myrank, cE, device );
}
printf("Hello World from CUDA/MPI: Rank %d, Device %d, Thread %d, Block %d \n",
myrank, device, threadIdx.x, blockIdx.x );
__syncthreads();
}
| c59b0f321296bb1f95aa1b0435ce6fb28f8deb70.cu | #include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
extern "C"
{
void runCudaLand( int myrank );
}
__global__ void Hello_kernel( int myrank );
void runCudaLand( int myrank )
{
int cudaDeviceCount = -1;
int assignedCudaDevice = -1;
cudaError_t cE = cudaSuccess;
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
if( (cE = cudaGetDevice( &assignedCudaDevice )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
if( assignedCudaDevice != (myrank % cudaDeviceCount) )
{
printf("MPI Rank %d: assignedCudaDevice %d NOT EQ to (myrank(%d) mod cudaDeviceCount(%d)) \n",
myrank, assignedCudaDevice, myrank, cudaDeviceCount );
exit(-1);
}
printf("MPI Rank %d: leaving CPU land and going to CUDA Device %d \n", myrank, (myrank % cudaDeviceCount));
Hello_kernel<<<1,1>>>( myrank );
cudaDeviceSynchronize();
printf("MPI Rank %d: re-entering CPU land \n", myrank );
}
__global__ void Hello_kernel( int myrank )
{
cudaError_t cE = cudaSuccess;
int device=-12;
int cudaDeviceCount = -1;
/* if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess ) */
/* { */
/* printf(" Rank %d in CUDA: Unable to determine cuda device count, error is %d, count is %d\n", */
/* myrank, cE, cudaDeviceCount ); */
/* } */
if( (cE = cudaGetDevice( &device)) != cudaSuccess )
{
printf(" Rank %d in CUDA: Unable to determine cuda device number, error is %d, device is %d\n",
myrank, cE, device );
}
printf("Hello World from CUDA/MPI: Rank %d, Device %d, Thread %d, Block %d \n",
myrank, device, threadIdx.x, blockIdx.x );
__syncthreads();
}
|
46440a059e85ed67efe2bccd48412c631ba3adc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
#include <hip/hip_complex.h>
__global__ void exp_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = expf(dy[i]);
}
} | 46440a059e85ed67efe2bccd48412c631ba3adc9.cu | extern "C"
#include <math.h>
#include <cuComplex.h>
__global__ void exp_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = expf(dy[i]);
}
} |
42d41454d8fd0d542ebd74264b8e8fa81b819efd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* This does nothing, and just exists to simply the code generation.
*/
inline __device__ real3 trim(real3 v) {
return v;
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the difference between two vectors, taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 deltaPeriodic(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
result.x -= floor(result.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
result.y -= floor(result.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
result.z -= floor(result.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
inline __device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 result = cross(vec1, vec2);
return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z);
}
/**
* Compute forces on donors.
*/
extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real energy = 0;
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) {
// Load information about the donor this thread will compute forces on.
int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 d1, d2, d3;
if (donorIndex < NUM_DONORS) {
atoms = donorAtoms[donorIndex];
d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[donorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) {
// Load the next block of acceptors into local memory.
int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (donorIndex < NUM_DONORS) {
for (int index = 0; index < blockSize; index++) {
#ifdef USE_EXCLUSIONS
int acceptorIndex = acceptorStart+index;
if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 a1 = posBuffer[3*index];
real4 a2 = posBuffer[3*index+1];
real4 a3 = posBuffer[3*index+2];
real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_DONOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (donorIndex < NUM_DONORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute forces on acceptors.
*/
extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) {
// Load information about the acceptor this thread will compute forces on.
int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 a1, a2, a3;
if (acceptorIndex < NUM_ACCEPTORS) {
atoms = acceptorAtoms[acceptorIndex];
a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[acceptorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) {
// Load the next block of donors into local memory.
int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = donorAtoms[donorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (acceptorIndex < NUM_ACCEPTORS) {
for (int index = 0; index < blockSize; index++) {
#ifdef USE_EXCLUSIONS
int donorIndex = donorStart+index;
if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 d1 = posBuffer[3*index];
real4 d2 = posBuffer[3*index+1];
real4 d3 = posBuffer[3*index+2];
real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_ACCEPTOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (acceptorIndex < NUM_ACCEPTORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
}
| 42d41454d8fd0d542ebd74264b8e8fa81b819efd.cu | /**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* This does nothing, and just exists to simply the code generation.
*/
inline __device__ real3 trim(real3 v) {
return v;
}
/**
* Compute the difference between two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real4 vec1, real4 vec2) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the difference between two vectors, taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 deltaPeriodic(real4 vec1, real4 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
result.x -= floor(result.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
result.y -= floor(result.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
result.z -= floor(result.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
inline __device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 result = cross(vec1, vec2);
return make_real4(result.x, result.y, result.z, result.x*result.x + result.y*result.y + result.z*result.z);
}
/**
* Compute forces on donors.
*/
extern "C" __global__ void computeDonorForces(unsigned long long* __restrict__ force, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real energy = 0;
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x*gridDim.x) {
// Load information about the donor this thread will compute forces on.
int donorIndex = donorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 d1, d2, d3;
if (donorIndex < NUM_DONORS) {
atoms = donorAtoms[donorIndex];
d1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
d2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
d3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[donorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x) {
// Load the next block of acceptors into local memory.
int blockSize = min((int) blockDim.x, NUM_ACCEPTORS-acceptorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = acceptorAtoms[acceptorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (donorIndex < NUM_DONORS) {
for (int index = 0; index < blockSize; index++) {
#ifdef USE_EXCLUSIONS
int acceptorIndex = acceptorStart+index;
if (acceptorIndex == exclusionIndices.x || acceptorIndex == exclusionIndices.y || acceptorIndex == exclusionIndices.z || acceptorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 a1 = posBuffer[3*index];
real4 a2 = posBuffer[3*index+1];
real4 a3 = posBuffer[3*index+2];
real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_DONOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (donorIndex < NUM_DONORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Compute forces on acceptors.
*/
extern "C" __global__ void computeAcceptorForces(unsigned long long* __restrict__ force, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
const int4* __restrict__ exclusions, const int4* __restrict__ donorAtoms, const int4* __restrict__ acceptorAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize
PARAMETER_ARGUMENTS) {
extern __shared__ real4 posBuffer[];
real3 f1 = make_real3(0);
real3 f2 = make_real3(0);
real3 f3 = make_real3(0);
for (int acceptorStart = 0; acceptorStart < NUM_ACCEPTORS; acceptorStart += blockDim.x*gridDim.x) {
// Load information about the acceptor this thread will compute forces on.
int acceptorIndex = acceptorStart+blockIdx.x*blockDim.x+threadIdx.x;
int4 atoms, exclusionIndices;
real4 a1, a2, a3;
if (acceptorIndex < NUM_ACCEPTORS) {
atoms = acceptorAtoms[acceptorIndex];
a1 = (atoms.x > -1 ? posq[atoms.x] : make_real4(0));
a2 = (atoms.y > -1 ? posq[atoms.y] : make_real4(0));
a3 = (atoms.z > -1 ? posq[atoms.z] : make_real4(0));
#ifdef USE_EXCLUSIONS
exclusionIndices = exclusions[acceptorIndex];
#endif
}
else
atoms = make_int4(-1, -1, -1, -1);
for (int donorStart = 0; donorStart < NUM_DONORS; donorStart += blockDim.x) {
// Load the next block of donors into local memory.
int blockSize = min((int) blockDim.x, NUM_DONORS-donorStart);
if (threadIdx.x < blockSize) {
int4 atoms2 = donorAtoms[donorStart+threadIdx.x];
posBuffer[3*threadIdx.x] = (atoms2.x > -1 ? posq[atoms2.x] : make_real4(0));
posBuffer[3*threadIdx.x+1] = (atoms2.y > -1 ? posq[atoms2.y] : make_real4(0));
posBuffer[3*threadIdx.x+2] = (atoms2.z > -1 ? posq[atoms2.z] : make_real4(0));
}
__syncthreads();
if (acceptorIndex < NUM_ACCEPTORS) {
for (int index = 0; index < blockSize; index++) {
#ifdef USE_EXCLUSIONS
int donorIndex = donorStart+index;
if (donorIndex == exclusionIndices.x || donorIndex == exclusionIndices.y || donorIndex == exclusionIndices.z || donorIndex == exclusionIndices.w)
continue;
#endif
// Compute the interaction between a donor and an acceptor.
real4 d1 = posBuffer[3*index];
real4 d2 = posBuffer[3*index+1];
real4 d3 = posBuffer[3*index+2];
real4 deltaD1A1 = deltaPeriodic(d1, a1, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CUTOFF
if (deltaD1A1.w < CUTOFF_SQUARED) {
#endif
COMPUTE_ACCEPTOR_FORCE
#ifdef USE_CUTOFF
}
#endif
}
}
}
// Write results
if (acceptorIndex < NUM_ACCEPTORS) {
if (atoms.x > -1) {
atomicAdd(&force[atoms.x], static_cast<unsigned long long>((long long) (f1.x*0x100000000)));
atomicAdd(&force[atoms.x+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.y*0x100000000)));
atomicAdd(&force[atoms.x+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f1.z*0x100000000)));
__threadfence_block();
}
if (atoms.y > -1) {
atomicAdd(&force[atoms.y], static_cast<unsigned long long>((long long) (f2.x*0x100000000)));
atomicAdd(&force[atoms.y+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.y*0x100000000)));
atomicAdd(&force[atoms.y+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f2.z*0x100000000)));
__threadfence_block();
}
if (atoms.z > -1) {
atomicAdd(&force[atoms.z], static_cast<unsigned long long>((long long) (f3.x*0x100000000)));
atomicAdd(&force[atoms.z+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.y*0x100000000)));
atomicAdd(&force[atoms.z+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (f3.z*0x100000000)));
__threadfence_block();
}
}
}
}
|
1d1b43ee4d5e10d75602b5a1ea53d3cc3500e2e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "test_quantile.h"
#include "../helpers.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/quantile.cuh"
namespace xgboost {
namespace {
struct IsSorted {
XGBOOST_DEVICE bool operator()(common::SketchEntry const& a, common::SketchEntry const& b) const {
return a.value < b.value;
}
};
}
namespace common {
TEST(GPUQuantile, Basic) {
constexpr size_t kRows = 1000, kCols = 100, kBins = 256;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, kBins, kCols, kRows, 0);
dh::caching_device_vector<Entry> entries;
dh::device_vector<bst_row_t> cuts_ptr(kCols+1);
thrust::fill(cuts_ptr.begin(), cuts_ptr.end(), 0);
// Push empty
sketch.Push(dh::ToSpan(entries), dh::ToSpan(cuts_ptr), dh::ToSpan(cuts_ptr), 0);
ASSERT_EQ(sketch.Data().size(), 0);
}
void TestSketchUnique(float sparsity) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [kRows, kCols, sparsity](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, sparsity}
.Seed(seed)
.Device(0)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
dh::caching_device_vector<size_t> column_sizes_scan;
HostDeviceVector<size_t> cut_sizes_scan;
auto batch = adapter.Value();
data::IsValidFunctor is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) { return batch.GetElement(idx); });
auto end = kCols * kRows;
detail::GetColumnSizesScan(0, kCols, n_cuts, batch_iter, is_valid, 0, end,
&cut_sizes_scan, &column_sizes_scan);
auto const& cut_sizes = cut_sizes_scan.HostVector();
ASSERT_LE(sketch.Data().size(), cut_sizes.back());
std::vector<size_t> h_columns_ptr(sketch.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, sketch.ColumnsPtr());
ASSERT_EQ(sketch.Data().size(), h_columns_ptr.back());
sketch.Unique();
std::vector<SketchEntry> h_data(sketch.Data().size());
thrust::copy(dh::tcbegin(sketch.Data()), dh::tcend(sketch.Data()), h_data.begin());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = common::Span<SketchEntry>(h_data).subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
});
}
TEST(GPUQuantile, Unique) {
TestSketchUnique(0);
TestSketchUnique(0.5);
}
// if with_error is true, the test tolerates floating point error
void TestQuantileElemRank(int32_t device, Span<SketchEntry const> in,
Span<bst_row_t const> d_columns_ptr, bool with_error = false) {
dh::safe_cuda(hipSetDevice(device));
std::vector<SketchEntry> h_in(in.size());
dh::CopyDeviceSpanToVector(&h_in, in);
std::vector<bst_row_t> h_columns_ptr(d_columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, d_columns_ptr);
for (size_t i = 1; i < d_columns_ptr.size(); ++i) {
auto column_id = i - 1;
auto beg = h_columns_ptr[column_id];
auto end = h_columns_ptr[i];
auto in_column = Span<SketchEntry>{h_in}.subspan(beg, end - beg);
for (size_t idx = 1; idx < in_column.size(); ++idx) {
float prev_rmin = in_column[idx - 1].rmin;
float prev_rmax = in_column[idx - 1].rmax;
float rmin_next = in_column[idx].RMinNext();
if (with_error) {
ASSERT_GE(in_column[idx].rmin + in_column[idx].rmin * kRtEps,
prev_rmin);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps, prev_rmax);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps,
rmin_next);
} else {
ASSERT_GE(in_column[idx].rmin, prev_rmin);
ASSERT_GE(in_column[idx].rmax, prev_rmax);
ASSERT_GE(in_column[idx].rmax, rmin_next);
}
}
}
}
TEST(GPUQuantile, Prune) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
// LE because kRows * kCols is pushed into sketch, after removing
// duplicated entries we might not have that much inputs for prune.
ASSERT_LE(sketch.Data().size(), n_cuts * kCols);
sketch.Prune(n_bins);
ASSERT_LE(sketch.Data().size(), kRows * kCols);
// This is not necessarily true for all inputs without calling unique after
// prune.
ASSERT_TRUE(thrust::is_sorted(thrust::device, sketch.Data().data(),
sketch.Data().data() + sketch.Data().size(),
detail::SketchUnique{}));
TestQuantileElemRank(0, sketch.Data(), sketch.ColumnsPtr());
});
}
TEST(GPUQuantile, MergeEmpty) {
constexpr size_t kRows = 1000, kCols = 100;
size_t n_bins = 10;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
MetaInfo info;
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
std::vector<SketchEntry> entries_before(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_before, sketch_0.Data());
std::vector<bst_row_t> ptrs_before(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_before, sketch_0.ColumnsPtr());
thrust::device_vector<size_t> columns_ptr(kCols + 1);
// Merge an empty sketch
sketch_0.Merge(dh::ToSpan(columns_ptr), Span<SketchEntry>{});
std::vector<SketchEntry> entries_after(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_after, sketch_0.Data());
std::vector<bst_row_t> ptrs_after(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_after, sketch_0.ColumnsPtr());
CHECK_EQ(entries_before.size(), entries_after.size());
CHECK_EQ(ptrs_before.size(), ptrs_after.size());
for (size_t i = 0; i < entries_before.size(); ++i) {
CHECK_EQ(entries_before[i].value, entries_after[i].value);
CHECK_EQ(entries_before[i].rmin, entries_after[i].rmin);
CHECK_EQ(entries_before[i].rmax, entries_after[i].rmax);
CHECK_EQ(entries_before[i].wmin, entries_after[i].wmin);
}
for (size_t i = 0; i < ptrs_before.size(); ++i) {
CHECK_EQ(ptrs_before[i], ptrs_after[i]);
}
}
TEST(GPUQuantile, MergeBasic) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
SketchContainer sketch_1(ft, n_bins, kCols, kRows * kRows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
if (info.weights_.Size() != 0) {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), true);
sketch_0.FixError();
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), false);
} else {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
}
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
ASSERT_TRUE(
thrust::is_sorted(thrust::device, sketch_0.Data().data(),
sketch_0.Data().data() + sketch_0.Data().size(),
detail::SketchUnique{}));
});
}
void TestMergeDuplicated(int32_t n_bins, size_t cols, size_t rows, float frac) {
MetaInfo info;
int32_t seed = 0;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, cols, rows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_0);
size_t f_rows = rows * frac;
SketchContainer sketch_1(ft, n_bins, cols, f_rows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{f_rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
auto data_1 = storage_1.DeviceSpan();
auto tuple_it = thrust::make_tuple(
thrust::make_counting_iterator<size_t>(0ul), data_1.data());
using Tuple = thrust::tuple<size_t, float>;
auto it = thrust::make_zip_iterator(tuple_it);
thrust::transform(thrust::device, it, it + data_1.size(), data_1.data(),
[=] __device__(Tuple const &tuple) {
auto i = thrust::get<0>(tuple);
if (thrust::get<0>(tuple) % 2 == 0) {
return 0.0f;
} else {
return thrust::get<1>(tuple);
}
});
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
columns_ptr = sketch_0.ColumnsPtr();
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
std::vector<SketchEntry> h_data(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&h_data, sketch_0.Data());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = Span<SketchEntry> {h_data}.subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
}
TEST(GPUQuantile, MergeDuplicated) {
size_t n_bins = 256;
constexpr size_t kRows = 1000, kCols = 100;
for (float frac = 0.5; frac < 2.5; frac += 0.5) {
TestMergeDuplicated(n_bins, kRows, kCols, frac);
}
}
TEST(GPUQuantile, MultiMerge) {
constexpr size_t kRows = 20, kCols = 1;
int32_t world = 2;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
// Set up single node version
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = ::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
});
}
TEST(GPUQuantile, AllReduceBasic) {
// This test is supposed to run by a python test that setups the environment.
std::string msg {"Skipping AllReduce test"};
auto n_gpus = AllVisibleGPUs();
InitRabitContext(msg, n_gpus);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, n_gpus);
} else {
return;
}
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
// Set up single node version;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = ::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
// Set up distributed version. We rely on using rank as seed to generate
// the exact same copy of data.
auto rank = rabit::GetRank();
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
ASSERT_EQ(sketch_distributed.ColumnsPtr().size(),
sketch_on_single_node.ColumnsPtr().size());
ASSERT_EQ(sketch_distributed.Data().size(),
sketch_on_single_node.Data().size());
TestQuantileElemRank(0, sketch_distributed.Data(),
sketch_distributed.ColumnsPtr());
std::vector<SketchEntry> single_node_data(
sketch_on_single_node.Data().size());
dh::CopyDeviceSpanToVector(&single_node_data, sketch_on_single_node.Data());
std::vector<SketchEntry> distributed_data(sketch_distributed.Data().size());
dh::CopyDeviceSpanToVector(&distributed_data, sketch_distributed.Data());
float Eps = 2e-4 * world;
for (size_t i = 0; i < single_node_data.size(); ++i) {
ASSERT_NEAR(single_node_data[i].value, distributed_data[i].value, Eps);
ASSERT_NEAR(single_node_data[i].rmax, distributed_data[i].rmax, Eps);
ASSERT_NEAR(single_node_data[i].rmin, distributed_data[i].rmin, Eps);
ASSERT_NEAR(single_node_data[i].wmin, distributed_data[i].wmin, Eps);
}
});
rabit::Finalize();
}
TEST(GPUQuantile, SameOnAllWorkers) {
std::string msg {"Skipping SameOnAllWorkers test"};
auto n_gpus = AllVisibleGPUs();
InitRabitContext(msg, n_gpus);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, n_gpus);
} else {
return;
}
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
auto rank = rabit::GetRank();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
TestQuantileElemRank(0, sketch_distributed.Data(), sketch_distributed.ColumnsPtr());
// Test for all workers having the same sketch.
size_t n_data = sketch_distributed.Data().size();
rabit::Allreduce<rabit::op::Max>(&n_data, 1);
ASSERT_EQ(n_data, sketch_distributed.Data().size());
size_t size_as_float =
sketch_distributed.Data().size_bytes() / sizeof(float);
auto local_data = Span<float const>{
reinterpret_cast<float const *>(sketch_distributed.Data().data()),
size_as_float};
dh::caching_device_vector<float> all_workers(size_as_float * world);
thrust::fill(all_workers.begin(), all_workers.end(), 0);
thrust::copy(thrust::device, local_data.data(),
local_data.data() + local_data.size(),
all_workers.begin() + local_data.size() * rank);
dh::AllReducer reducer;
reducer.Init(0);
reducer.AllReduceSum(all_workers.data().get(), all_workers.data().get(),
all_workers.size());
reducer.Synchronize();
auto base_line = dh::ToSpan(all_workers).subspan(0, size_as_float);
std::vector<float> h_base_line(base_line.size());
dh::CopyDeviceSpanToVector(&h_base_line, base_line);
size_t offset = 0;
for (decltype(world) i = 0; i < world; ++i) {
auto comp = dh::ToSpan(all_workers).subspan(offset, size_as_float);
std::vector<float> h_comp(comp.size());
dh::CopyDeviceSpanToVector(&h_comp, comp);
ASSERT_EQ(comp.size(), base_line.size());
for (size_t j = 0; j < h_comp.size(); ++j) {
ASSERT_NEAR(h_base_line[j], h_comp[j], kRtEps);
}
offset += size_as_float;
}
});
}
TEST(GPUQuantile, Push) {
size_t constexpr kRows = 100;
std::vector<float> data(kRows);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::fill(data.begin() + (data.size() / 2), data.end(), 0.5f);
int32_t n_bins = 128;
bst_feature_t constexpr kCols = 1;
std::vector<Entry> entries(kRows);
for (bst_feature_t i = 0; i < entries.size(); ++i) {
Entry e{i, data[i]};
entries[i] = e;
}
dh::device_vector<Entry> d_entries(entries);
dh::device_vector<size_t> columns_ptr(2);
columns_ptr[0] = 0;
columns_ptr[1] = kRows;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr), dh::ToSpan(columns_ptr), kRows, {});
auto sketch_data = sketch.Data();
thrust::host_vector<SketchEntry> h_sketch_data(sketch_data.size());
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
ASSERT_EQ(h_sketch_data.size(), 2);
auto v_0 = h_sketch_data[0];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
TEST(GPUQuantile, MultiColPush) {
size_t constexpr kRows = 100, kCols = 4;
std::vector<float> data(kRows * kCols);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::vector<Entry> entries(kRows * kCols);
for (bst_feature_t c = 0; c < kCols; ++c) {
for (size_t r = 0; r < kRows; ++r) {
float v = (r >= kRows / 2) ? 0.7 : 0.4;
auto e = Entry{c, v};
entries[c * kRows + r] = e;
}
}
int32_t n_bins = 16;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
dh::device_vector<Entry> d_entries {entries};
dh::device_vector<size_t> columns_ptr(kCols + 1, 0);
for (size_t i = 1; i < kCols + 1; ++i) {
columns_ptr[i] = kRows;
}
thrust::inclusive_scan(thrust::device, columns_ptr.begin(), columns_ptr.end(),
columns_ptr.begin());
dh::device_vector<size_t> cuts_ptr(columns_ptr);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr),
dh::ToSpan(cuts_ptr), kRows * kCols, {});
auto sketch_data = sketch.Data();
ASSERT_EQ(sketch_data.size(), kCols * 2);
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
std::vector<SketchEntry> h_sketch_data(sketch_data.size());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
for (size_t i = 0; i < kCols; ++i) {
auto v_0 = h_sketch_data[i * 2];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[i * 2 + 1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
}
} // namespace common
} // namespace xgboost
| 1d1b43ee4d5e10d75602b5a1ea53d3cc3500e2e3.cu | #include <gtest/gtest.h>
#include "test_quantile.h"
#include "../helpers.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/quantile.cuh"
namespace xgboost {
namespace {
struct IsSorted {
XGBOOST_DEVICE bool operator()(common::SketchEntry const& a, common::SketchEntry const& b) const {
return a.value < b.value;
}
};
}
namespace common {
TEST(GPUQuantile, Basic) {
constexpr size_t kRows = 1000, kCols = 100, kBins = 256;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, kBins, kCols, kRows, 0);
dh::caching_device_vector<Entry> entries;
dh::device_vector<bst_row_t> cuts_ptr(kCols+1);
thrust::fill(cuts_ptr.begin(), cuts_ptr.end(), 0);
// Push empty
sketch.Push(dh::ToSpan(entries), dh::ToSpan(cuts_ptr), dh::ToSpan(cuts_ptr), 0);
ASSERT_EQ(sketch.Data().size(), 0);
}
void TestSketchUnique(float sparsity) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [kRows, kCols, sparsity](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, sparsity}
.Seed(seed)
.Device(0)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
dh::caching_device_vector<size_t> column_sizes_scan;
HostDeviceVector<size_t> cut_sizes_scan;
auto batch = adapter.Value();
data::IsValidFunctor is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) { return batch.GetElement(idx); });
auto end = kCols * kRows;
detail::GetColumnSizesScan(0, kCols, n_cuts, batch_iter, is_valid, 0, end,
&cut_sizes_scan, &column_sizes_scan);
auto const& cut_sizes = cut_sizes_scan.HostVector();
ASSERT_LE(sketch.Data().size(), cut_sizes.back());
std::vector<size_t> h_columns_ptr(sketch.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, sketch.ColumnsPtr());
ASSERT_EQ(sketch.Data().size(), h_columns_ptr.back());
sketch.Unique();
std::vector<SketchEntry> h_data(sketch.Data().size());
thrust::copy(dh::tcbegin(sketch.Data()), dh::tcend(sketch.Data()), h_data.begin());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = common::Span<SketchEntry>(h_data).subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
});
}
TEST(GPUQuantile, Unique) {
TestSketchUnique(0);
TestSketchUnique(0.5);
}
// if with_error is true, the test tolerates floating point error
void TestQuantileElemRank(int32_t device, Span<SketchEntry const> in,
Span<bst_row_t const> d_columns_ptr, bool with_error = false) {
dh::safe_cuda(cudaSetDevice(device));
std::vector<SketchEntry> h_in(in.size());
dh::CopyDeviceSpanToVector(&h_in, in);
std::vector<bst_row_t> h_columns_ptr(d_columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, d_columns_ptr);
for (size_t i = 1; i < d_columns_ptr.size(); ++i) {
auto column_id = i - 1;
auto beg = h_columns_ptr[column_id];
auto end = h_columns_ptr[i];
auto in_column = Span<SketchEntry>{h_in}.subspan(beg, end - beg);
for (size_t idx = 1; idx < in_column.size(); ++idx) {
float prev_rmin = in_column[idx - 1].rmin;
float prev_rmax = in_column[idx - 1].rmax;
float rmin_next = in_column[idx].RMinNext();
if (with_error) {
ASSERT_GE(in_column[idx].rmin + in_column[idx].rmin * kRtEps,
prev_rmin);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps, prev_rmax);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps,
rmin_next);
} else {
ASSERT_GE(in_column[idx].rmin, prev_rmin);
ASSERT_GE(in_column[idx].rmax, prev_rmax);
ASSERT_GE(in_column[idx].rmax, rmin_next);
}
}
}
}
TEST(GPUQuantile, Prune) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
// LE because kRows * kCols is pushed into sketch, after removing
// duplicated entries we might not have that much inputs for prune.
ASSERT_LE(sketch.Data().size(), n_cuts * kCols);
sketch.Prune(n_bins);
ASSERT_LE(sketch.Data().size(), kRows * kCols);
// This is not necessarily true for all inputs without calling unique after
// prune.
ASSERT_TRUE(thrust::is_sorted(thrust::device, sketch.Data().data(),
sketch.Data().data() + sketch.Data().size(),
detail::SketchUnique{}));
TestQuantileElemRank(0, sketch.Data(), sketch.ColumnsPtr());
});
}
TEST(GPUQuantile, MergeEmpty) {
constexpr size_t kRows = 1000, kCols = 100;
size_t n_bins = 10;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
MetaInfo info;
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
std::vector<SketchEntry> entries_before(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_before, sketch_0.Data());
std::vector<bst_row_t> ptrs_before(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_before, sketch_0.ColumnsPtr());
thrust::device_vector<size_t> columns_ptr(kCols + 1);
// Merge an empty sketch
sketch_0.Merge(dh::ToSpan(columns_ptr), Span<SketchEntry>{});
std::vector<SketchEntry> entries_after(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_after, sketch_0.Data());
std::vector<bst_row_t> ptrs_after(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_after, sketch_0.ColumnsPtr());
CHECK_EQ(entries_before.size(), entries_after.size());
CHECK_EQ(ptrs_before.size(), ptrs_after.size());
for (size_t i = 0; i < entries_before.size(); ++i) {
CHECK_EQ(entries_before[i].value, entries_after[i].value);
CHECK_EQ(entries_before[i].rmin, entries_after[i].rmin);
CHECK_EQ(entries_before[i].rmax, entries_after[i].rmax);
CHECK_EQ(entries_before[i].wmin, entries_after[i].wmin);
}
for (size_t i = 0; i < ptrs_before.size(); ++i) {
CHECK_EQ(ptrs_before[i], ptrs_after[i]);
}
}
TEST(GPUQuantile, MergeBasic) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
SketchContainer sketch_1(ft, n_bins, kCols, kRows * kRows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
if (info.weights_.Size() != 0) {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), true);
sketch_0.FixError();
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), false);
} else {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
}
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
ASSERT_TRUE(
thrust::is_sorted(thrust::device, sketch_0.Data().data(),
sketch_0.Data().data() + sketch_0.Data().size(),
detail::SketchUnique{}));
});
}
void TestMergeDuplicated(int32_t n_bins, size_t cols, size_t rows, float frac) {
MetaInfo info;
int32_t seed = 0;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, cols, rows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_0);
size_t f_rows = rows * frac;
SketchContainer sketch_1(ft, n_bins, cols, f_rows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{f_rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
auto data_1 = storage_1.DeviceSpan();
auto tuple_it = thrust::make_tuple(
thrust::make_counting_iterator<size_t>(0ul), data_1.data());
using Tuple = thrust::tuple<size_t, float>;
auto it = thrust::make_zip_iterator(tuple_it);
thrust::transform(thrust::device, it, it + data_1.size(), data_1.data(),
[=] __device__(Tuple const &tuple) {
auto i = thrust::get<0>(tuple);
if (thrust::get<0>(tuple) % 2 == 0) {
return 0.0f;
} else {
return thrust::get<1>(tuple);
}
});
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
columns_ptr = sketch_0.ColumnsPtr();
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
std::vector<SketchEntry> h_data(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&h_data, sketch_0.Data());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = Span<SketchEntry> {h_data}.subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
}
TEST(GPUQuantile, MergeDuplicated) {
size_t n_bins = 256;
constexpr size_t kRows = 1000, kCols = 100;
for (float frac = 0.5; frac < 2.5; frac += 0.5) {
TestMergeDuplicated(n_bins, kRows, kCols, frac);
}
}
TEST(GPUQuantile, MultiMerge) {
constexpr size_t kRows = 20, kCols = 1;
int32_t world = 2;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
// Set up single node version
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = std::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
});
}
TEST(GPUQuantile, AllReduceBasic) {
// This test is supposed to run by a python test that setups the environment.
std::string msg {"Skipping AllReduce test"};
auto n_gpus = AllVisibleGPUs();
InitRabitContext(msg, n_gpus);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, n_gpus);
} else {
return;
}
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
// Set up single node version;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = std::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
// Set up distributed version. We rely on using rank as seed to generate
// the exact same copy of data.
auto rank = rabit::GetRank();
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
ASSERT_EQ(sketch_distributed.ColumnsPtr().size(),
sketch_on_single_node.ColumnsPtr().size());
ASSERT_EQ(sketch_distributed.Data().size(),
sketch_on_single_node.Data().size());
TestQuantileElemRank(0, sketch_distributed.Data(),
sketch_distributed.ColumnsPtr());
std::vector<SketchEntry> single_node_data(
sketch_on_single_node.Data().size());
dh::CopyDeviceSpanToVector(&single_node_data, sketch_on_single_node.Data());
std::vector<SketchEntry> distributed_data(sketch_distributed.Data().size());
dh::CopyDeviceSpanToVector(&distributed_data, sketch_distributed.Data());
float Eps = 2e-4 * world;
for (size_t i = 0; i < single_node_data.size(); ++i) {
ASSERT_NEAR(single_node_data[i].value, distributed_data[i].value, Eps);
ASSERT_NEAR(single_node_data[i].rmax, distributed_data[i].rmax, Eps);
ASSERT_NEAR(single_node_data[i].rmin, distributed_data[i].rmin, Eps);
ASSERT_NEAR(single_node_data[i].wmin, distributed_data[i].wmin, Eps);
}
});
rabit::Finalize();
}
TEST(GPUQuantile, SameOnAllWorkers) {
std::string msg {"Skipping SameOnAllWorkers test"};
auto n_gpus = AllVisibleGPUs();
InitRabitContext(msg, n_gpus);
auto world = rabit::GetWorldSize();
if (world != 1) {
ASSERT_EQ(world, n_gpus);
} else {
return;
}
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
auto rank = rabit::GetRank();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
TestQuantileElemRank(0, sketch_distributed.Data(), sketch_distributed.ColumnsPtr());
// Test for all workers having the same sketch.
size_t n_data = sketch_distributed.Data().size();
rabit::Allreduce<rabit::op::Max>(&n_data, 1);
ASSERT_EQ(n_data, sketch_distributed.Data().size());
size_t size_as_float =
sketch_distributed.Data().size_bytes() / sizeof(float);
auto local_data = Span<float const>{
reinterpret_cast<float const *>(sketch_distributed.Data().data()),
size_as_float};
dh::caching_device_vector<float> all_workers(size_as_float * world);
thrust::fill(all_workers.begin(), all_workers.end(), 0);
thrust::copy(thrust::device, local_data.data(),
local_data.data() + local_data.size(),
all_workers.begin() + local_data.size() * rank);
dh::AllReducer reducer;
reducer.Init(0);
reducer.AllReduceSum(all_workers.data().get(), all_workers.data().get(),
all_workers.size());
reducer.Synchronize();
auto base_line = dh::ToSpan(all_workers).subspan(0, size_as_float);
std::vector<float> h_base_line(base_line.size());
dh::CopyDeviceSpanToVector(&h_base_line, base_line);
size_t offset = 0;
for (decltype(world) i = 0; i < world; ++i) {
auto comp = dh::ToSpan(all_workers).subspan(offset, size_as_float);
std::vector<float> h_comp(comp.size());
dh::CopyDeviceSpanToVector(&h_comp, comp);
ASSERT_EQ(comp.size(), base_line.size());
for (size_t j = 0; j < h_comp.size(); ++j) {
ASSERT_NEAR(h_base_line[j], h_comp[j], kRtEps);
}
offset += size_as_float;
}
});
}
TEST(GPUQuantile, Push) {
size_t constexpr kRows = 100;
std::vector<float> data(kRows);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::fill(data.begin() + (data.size() / 2), data.end(), 0.5f);
int32_t n_bins = 128;
bst_feature_t constexpr kCols = 1;
std::vector<Entry> entries(kRows);
for (bst_feature_t i = 0; i < entries.size(); ++i) {
Entry e{i, data[i]};
entries[i] = e;
}
dh::device_vector<Entry> d_entries(entries);
dh::device_vector<size_t> columns_ptr(2);
columns_ptr[0] = 0;
columns_ptr[1] = kRows;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr), dh::ToSpan(columns_ptr), kRows, {});
auto sketch_data = sketch.Data();
thrust::host_vector<SketchEntry> h_sketch_data(sketch_data.size());
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
ASSERT_EQ(h_sketch_data.size(), 2);
auto v_0 = h_sketch_data[0];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
TEST(GPUQuantile, MultiColPush) {
size_t constexpr kRows = 100, kCols = 4;
std::vector<float> data(kRows * kCols);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::vector<Entry> entries(kRows * kCols);
for (bst_feature_t c = 0; c < kCols; ++c) {
for (size_t r = 0; r < kRows; ++r) {
float v = (r >= kRows / 2) ? 0.7 : 0.4;
auto e = Entry{c, v};
entries[c * kRows + r] = e;
}
}
int32_t n_bins = 16;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
dh::device_vector<Entry> d_entries {entries};
dh::device_vector<size_t> columns_ptr(kCols + 1, 0);
for (size_t i = 1; i < kCols + 1; ++i) {
columns_ptr[i] = kRows;
}
thrust::inclusive_scan(thrust::device, columns_ptr.begin(), columns_ptr.end(),
columns_ptr.begin());
dh::device_vector<size_t> cuts_ptr(columns_ptr);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr),
dh::ToSpan(cuts_ptr), kRows * kCols, {});
auto sketch_data = sketch.Data();
ASSERT_EQ(sketch_data.size(), kCols * 2);
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
std::vector<SketchEntry> h_sketch_data(sketch_data.size());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
for (size_t i = 0; i < kCols; ++i) {
auto v_0 = h_sketch_data[i * 2];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[i * 2 + 1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
}
} // namespace common
} // namespace xgboost
|
7bd656a2ec32ec0eec3b9514820c7470bcf79edc.hip | // !!! This is a file automatically generated by hipify!!!
/*
% Function: decompose_subframe
% Description: compose the subframe by multiplexing the dmrs signal and data
% Inputs: *subframe_h the subframe with data of all ofdm symbols
% M_pusch_rb number of resource blocks assigned to the ue
% Outputs: *complex_data_h: complex data to be sent in subframe
% *dmrs_1_h: demodulation reference signal number 1
% *dmrs_2_h: demodulation reference signal number 2
By: Ahmad Nour & Mohammed Mostafa
*/
#include "decompose_subframe.cuh"
int main(int argc, char **argv) {
const int M_pusch_rb = 100;
const int M_pusch_sc = N_sc_rb * M_pusch_rb;
//input
hipfftComplex* subframe_h = (hipfftComplex *)malloc(sizeof(hipfftComplex) * N_symbs_per_subframe * M_pusch_sc);
for (int i = 0; i < N_symbs_per_subframe * M_pusch_sc; i++)
{
subframe_h[i].x = rand() / (float)RAND_MAX * 10;
subframe_h[i].y = rand() / (float)RAND_MAX * 10;
}
//For output
hipfftComplex *complex_data_h;
hipfftComplex *dmrs_1_h;
hipfftComplex *dmrs_2_h;
//Call the Transform Precoder Function
decompose_subframe(subframe_h, M_pusch_rb, &complex_data_h, &dmrs_1_h, &dmrs_2_h);
//Print results
for (int i = 0; i < 12*M_pusch_sc; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, complex_data_h[i].x, complex_data_h[i].y);
}
//input file
FILE *results;
if ((results = freopen("decompose_subframe_input.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("clear; clc;\nsymbols_real = [ ");
for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
{
printf("%10f", subframe_h[i].x);
if (i != ((N_symbs_per_subframe*M_pusch_sc) - 1))
printf(",");
}
printf(" ];\nsymbols_imag = [ ");
for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
{
printf("%10f", subframe_h[i].y);
if (i != ((N_symbs_per_subframe*M_pusch_sc) - 1))
printf(",");
}
printf(" ];\n");
printf("subframe_CUDA = symbols_real + 1i * symbols_imag;\n");
fclose(results);
//output file
FILE *results1;
if ((results1 = freopen("decompose_subframe_Results_.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//complex_data
printf("clear; clc;\ncomplex_data_real = [ ");
for (int i = 0; i < (12 * M_pusch_sc); i++)
{
printf("%10f", complex_data_h[i].x);
if (i != ((12 * M_pusch_sc) - 1))
printf(",");
}
printf(" ];\ncomplex_data_imag = [ ");
for (int i = 0; i < (12 * M_pusch_sc); i++)
{
printf("%10f", complex_data_h[i].y);
if (i != ((12 * M_pusch_sc) - 1))
printf(",");
}
printf(" ];\n");
printf("complex_data_CUDA = complex_data_real + 1i * complex_data_imag;\n");
//dmrs_1
printf("dmrs1_real = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_1_h[i].x);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\ndmrs1_imag = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_1_h[i].y);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\n");
printf("dmrs1_CUDA = dmrs1_real + 1i * dmrs1_imag;\n");
//dmrs2
printf("dmrs2_real = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_2_h[i].x);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\ndmrs2_imag = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_2_h[i].y);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\n");
printf("dmrs2_CUDA = dmrs2_real + 1i * dmrs2_imag;\n");
//close input file
fclose(results1);
return 0;
} | 7bd656a2ec32ec0eec3b9514820c7470bcf79edc.cu | /*
% Function: decompose_subframe
% Description: compose the subframe by multiplexing the dmrs signal and data
% Inputs: *subframe_h the subframe with data of all ofdm symbols
% M_pusch_rb number of resource blocks assigned to the ue
% Outputs: *complex_data_h: complex data to be sent in subframe
% *dmrs_1_h: demodulation reference signal number 1
% *dmrs_2_h: demodulation reference signal number 2
By: Ahmad Nour & Mohammed Mostafa
*/
#include "decompose_subframe.cuh"
int main(int argc, char **argv) {
const int M_pusch_rb = 100;
const int M_pusch_sc = N_sc_rb * M_pusch_rb;
//input
cufftComplex* subframe_h = (cufftComplex *)malloc(sizeof(cufftComplex) * N_symbs_per_subframe * M_pusch_sc);
for (int i = 0; i < N_symbs_per_subframe * M_pusch_sc; i++)
{
subframe_h[i].x = rand() / (float)RAND_MAX * 10;
subframe_h[i].y = rand() / (float)RAND_MAX * 10;
}
//For output
cufftComplex *complex_data_h;
cufftComplex *dmrs_1_h;
cufftComplex *dmrs_2_h;
//Call the Transform Precoder Function
decompose_subframe(subframe_h, M_pusch_rb, &complex_data_h, &dmrs_1_h, &dmrs_2_h);
//Print results
for (int i = 0; i < 12*M_pusch_sc; i++)
{
printf("idx = %d \t %f \t %f \n", i + 1, complex_data_h[i].x, complex_data_h[i].y);
}
//input file
FILE *results;
if ((results = freopen("decompose_subframe_input.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("clear; clc;\nsymbols_real = [ ");
for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
{
printf("%10f", subframe_h[i].x);
if (i != ((N_symbs_per_subframe*M_pusch_sc) - 1))
printf(",");
}
printf(" ];\nsymbols_imag = [ ");
for (int i = 0; i < (N_symbs_per_subframe*M_pusch_sc); i++)
{
printf("%10f", subframe_h[i].y);
if (i != ((N_symbs_per_subframe*M_pusch_sc) - 1))
printf(",");
}
printf(" ];\n");
printf("subframe_CUDA = symbols_real + 1i * symbols_imag;\n");
fclose(results);
//output file
FILE *results1;
if ((results1 = freopen("decompose_subframe_Results_.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
//complex_data
printf("clear; clc;\ncomplex_data_real = [ ");
for (int i = 0; i < (12 * M_pusch_sc); i++)
{
printf("%10f", complex_data_h[i].x);
if (i != ((12 * M_pusch_sc) - 1))
printf(",");
}
printf(" ];\ncomplex_data_imag = [ ");
for (int i = 0; i < (12 * M_pusch_sc); i++)
{
printf("%10f", complex_data_h[i].y);
if (i != ((12 * M_pusch_sc) - 1))
printf(",");
}
printf(" ];\n");
printf("complex_data_CUDA = complex_data_real + 1i * complex_data_imag;\n");
//dmrs_1
printf("dmrs1_real = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_1_h[i].x);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\ndmrs1_imag = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_1_h[i].y);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\n");
printf("dmrs1_CUDA = dmrs1_real + 1i * dmrs1_imag;\n");
//dmrs2
printf("dmrs2_real = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_2_h[i].x);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\ndmrs2_imag = [ ");
for (int i = 0; i < (M_pusch_sc); i++)
{
printf("%10f", dmrs_2_h[i].y);
if (i != ((M_pusch_sc)-1))
printf(",");
}
printf(" ];\n");
printf("dmrs2_CUDA = dmrs2_real + 1i * dmrs2_imag;\n");
//close input file
fclose(results1);
return 0;
} |
ab52085a6c0e63e2229ac3e7462d7f56cfbe0dc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHAtomics.cuh"
#include "THHApply.cuh"
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType, typename Real>
struct IndexToScatterGatherOffsets<IndexType, Real, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterAddKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
atomicAdd(&tensor.data[tensorOffset], src.data[srcOffset]);
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<long, IndexType> index,
Real value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#include "generic/THCTensorScatterGather.cu"
#include "THHGenerateAllTypes.h"
| ab52085a6c0e63e2229ac3e7462d7f56cfbe0dc5.cu | #include "hip/hip_runtime.h"
#include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCAtomics.cuh"
#include "THCApply.cuh"
// Compute the offsets into the given tensors for a linear index. For the 't2'
// tensor, dimension 'dim' is skipped. The tensors are assumed to have the same
// size (with the exception of 't2' in dimension 'dim').
// This version uses a static number of dimensions.
template <typename IndexType, typename Real, int Dims>
struct IndexToScatterGatherOffsets {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = Dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
// Same as above but using a dynamic number of dimensions.
template <typename IndexType, typename Real>
struct IndexToScatterGatherOffsets<IndexType, Real, -1> {
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t1, IndexType* t1Offset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
*t1Offset += curDimIndex * t1.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
static __device__ void compute(
IndexType linearId, const int dim,
const TensorInfo<long, IndexType>& index, IndexType* indexOffset,
const TensorInfo<Real, IndexType>& t2, IndexType* t2Offset) {
for (int d = index.dims - 1; d >= 0; d--) {
IndexType curDimIndex = linearId % index.sizes[d];
*indexOffset += curDimIndex * index.strides[d];
if (d != dim) {
*t2Offset += curDimIndex * t2.strides[d];
}
linearId /= index.sizes[d];
}
}
};
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_gatherKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset,
src, &srcOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < src.sizes[dim]);
srcOffset += indexValue * src.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = src.data[srcOffset];
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterAddKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<Real, IndexType> src,
TensorInfo<long, IndexType> index,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType srcOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
src, &srcOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
atomicAdd(&tensor.data[tensorOffset], src.data[srcOffset]);
}
}
template <typename IndexType, typename Real, int Dims>
__global__ void THCudaTensor_scatterFillKernel(
TensorInfo<Real, IndexType> tensor,
TensorInfo<long, IndexType> index,
Real value,
const int dim,
const IndexType totalElements) {
for (IndexType linearId = hipBlockIdx_x * hipBlockDim_x + hipThreadIdx_x;
linearId < totalElements;
linearId += hipGridDim_x * hipBlockDim_x) {
IndexType tensorOffset = 0;
IndexType indexOffset = 0;
IndexToScatterGatherOffsets<IndexType, Real, Dims>::compute(linearId, dim,
index, &indexOffset,
tensor, &tensorOffset);
long indexValue = index.data[indexOffset] - TH_INDEX_BASE;
assert(indexValue >= 0 && indexValue < tensor.sizes[dim]);
tensorOffset += indexValue * tensor.strides[dim];
tensor.data[tensorOffset] = value;
}
}
#include "generic/THCTensorScatterGather.cu"
#include "THCGenerateAllTypes.h"
|
40a9f26fab85f75443aca99f5b086465762005a8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* atomic.cu
*
* Created on: Oct 17, 2020
* Author: ahmadsv
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; ++i)
{
printf("%d ", array[i]);
}
printf("} \n");
}
__global__ void increment_naive(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
i = i % ARRAY_SIZE;
atomicAdd(&g[i],1);
}
int main (int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS,NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int *d_array;
hipMalloc((void **)&d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0 , ARRAY_BYTES);
timer.Start();
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
hipMemcpy(h_array,d_array,ARRAY_BYTES, hipMemcpyDeviceToHost);
//print_array(h_array, ARRAY_SIZE);
printf("Timer elapsed = %g ms\n", timer.Elapsed());
hipFree(d_array);
return 0;
}
| 40a9f26fab85f75443aca99f5b086465762005a8.cu | /*
* atomic.cu
*
* Created on: Oct 17, 2020
* Author: ahmadsv
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; ++i)
{
printf("%d ", array[i]);
}
printf("} \n");
}
__global__ void increment_naive(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
i = i % ARRAY_SIZE;
atomicAdd(&g[i],1);
}
int main (int argc, char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS,NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int *d_array;
cudaMalloc((void **)&d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0 , ARRAY_BYTES);
timer.Start();
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
cudaMemcpy(h_array,d_array,ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print_array(h_array, ARRAY_SIZE);
printf("Timer elapsed = %g ms\n", timer.Elapsed());
cudaFree(d_array);
return 0;
}
|
b7709bfd67ce45987d4e6e782586d78bc030b1ea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
using namespace std;
#include <ctime>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "microMC_chem.h"
ChemistrySpec::ChemistrySpec()
{
}
ChemistrySpec::~ChemistrySpec()
{
}
void ChemistrySpec::initChemistry(string fileprefix)
{
string fname;
fname = fileprefix;
fname += string("/RadiolyticSpecies.txt");
readRadiolyticSpecies(fname);
}
void ChemistrySpec::readRadiolyticSpecies(string fname)
{
char buffer[256];
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 37 was not opened\n");
exit(1);
}
fgets(buffer, 100, fp);
fscanf(fp, "%d\n", &numSpecType);
printf("%s\n", buffer);
printf("%d\n", numSpecType);
diffCoef_spec = new float[numSpecType];
radii_spec = new float[numSpecType];
fgets(buffer, 100, fp);
char specName[50];
int temp;
maxDiffCoef_spec = 0;
for (int i = 0; i < numSpecType; i++)
{
fscanf(fp, "%d %s %f %f\n", &temp, specName, &diffCoef_spec[i], &radii_spec[i]);
diffCoef_spec[i] = diffCoef_spec[i] * 1.0e-3f; // convert the diffusion coefficient from 10-9 m^2/s to nm^2/ps
Name_spec.push_back(specName);
if (maxDiffCoef_spec < diffCoef_spec[i])
maxDiffCoef_spec = diffCoef_spec[i];
// printf("i = %d, name of the particle is %s, diffusion coefficient is %e\n", i, Name_spec[i].c_str(), diffCoef_spec[i]);
}
fclose(fp);
}
ReactionType::ReactionType()
{
h_deltaT_adap[0] = 0.1f;
h_deltaT_adap[1] = 1.0f;
h_deltaT_adap[2] = 3.0f;
h_deltaT_adap[3] = 10.0f;
h_deltaT_adap[4] = 100.0f;
max_calc_radii_React[0] = 0.f;
max_calc_radii_React[1] = 0.f;
max_calc_radii_React[2] = 0.f;
max_calc_radii_React[3] = 0.f;
max_calc_radii_React[4] = 0.f;
}
ReactionType::~ReactionType()
{
}
void ReactionType::initReaction(ChemistrySpec chemistrySpec, string fileprefix)
{
string fname;
fname = fileprefix;
fname += string("_ReactionInfo_orig.txt");
readReactionTypeInfo(chemistrySpec, fname);
setReactantList_Par(chemistrySpec);
calcReactionRadius(chemistrySpec);
}
void ReactionType::readReactionTypeInfo(ChemistrySpec chemistrySpec, string fname)
{
char buffer[256];
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 116 was not opened\n");
exit(1);
}
fgets(buffer, 100, fp);
fscanf(fp, "%d\n", &numReact);
printf("%s\n", buffer);
printf("%d\n", numReact);
fgets(buffer, 200, fp);
int k1 = 0;
int k2 = 0;
int temp;
for (int i = 0; i < numReact; i++)
{
diffCoef_total_React[i] = 0.0f;
fscanf(fp, "%d ", &temp);
printf("i = %d, ", i);
fscanf(fp, "%d ", &numReactant_React[i]);
indexReactant_React[i] = k1;
for (int j = 0; j < numReactant_React[i]; j++)
{
fscanf(fp, "%d ", &typeReactant_React[k1]);
printf("%s ", chemistrySpec.Name_spec[typeReactant_React[k1]].c_str());
if(j < numReactant_React[i]-1) printf("+");//*/
diffCoef_total_React[i] += chemistrySpec.diffCoef_spec[typeReactant_React[k1]];
k1++;
}
fscanf(fp, "%d ", &numNewPar_React[i]);
indexNewPar_React[i] = k2;
printf(" = ");
for (int j = 0; j < numNewPar_React[i]; j++)
{
fscanf(fp, "%d ", &typeNewPar_React[k2]);
if(typeNewPar_React[k2]!=255)
printf("%s ", chemistrySpec.Name_spec[typeNewPar_React[k2]].c_str());
else
printf("H2O ");
if(j < numNewPar_React[i]-1) printf("+");//*/
k2++;
}
fscanf(fp, "%e %f %f\n", &kobs_React[i], &radii_React[i], &prob_React[i]);
printf("%e\n", kobs_React[i]);
//printf("%e %f %f\n", kobs_React[i], radii_React[i], prob_React[i]);
}
indexReactant_React[numReact] = k1;
indexNewPar_React[numReact] = k2;
cout<<"total number of new particles: "<<indexNewPar_React[numReact]<<endl;
}
void ReactionType::setReactantList_Par(ChemistrySpec chemistrySpec)
//---------------------------------------------------------------------------------------
//reorgnize the data for each type of the particles for searching the neighbors that can
//have a reaction with the current particle
//----------------------------------------------------------------------------------------
{
int i, j, k, idx;
int tempParType;
for (i = 0; i < chemistrySpec.numSpecType; i++)
{
numReactant_Par[i] = 0;
}
for (i = 0; i < numReact; i++)
{
for (j = indexReactant_React[i]; j < indexReactant_React[i+1]; j++)
{
tempParType = typeReactant_React[j];
for (k = indexReactant_React[i]; k < indexReactant_React[i + 1]; k++)
{
if (k != j)
{
if (numReactant_Par[tempParType] == 0)
{
typeReactant_Par[tempParType * MAXNUMREACTANT4PAR] = typeReactant_React[k];
subtypeReact_Par[tempParType * MAXNUMREACTANT4PAR] = i;
numReactant_Par[tempParType]++;
}
if (numReactant_Par[tempParType] > 0)
{
for (idx = 0; idx < numReactant_Par[tempParType]; idx++)
{
if (typeReactant_React[k] == typeReactant_Par[tempParType * MAXNUMREACTANT4PAR + idx])
break;
}
if (idx == numReactant_Par[tempParType])
{
typeReactant_Par[tempParType * MAXNUMREACTANT4PAR + numReactant_Par[tempParType]] = typeReactant_React[k];
subtypeReact_Par[tempParType * MAXNUMREACTANT4PAR + numReactant_Par[tempParType]] = i;
numReactant_Par[tempParType]++;
}
}
}
}
}
}
}
void ReactionType::calcReactionRadius(ChemistrySpec chemistrySpec)
{
int ireact, ideltaT;
float radii;
float temp;
for(ireact = 0; ireact < numReact; ireact++)
{
radii = kobs_React[ireact]/1e10/756.8725/diffCoef_total_React[ireact];// radii_React[ireact];
//printf("reaction %d, radii=%f\n", ireact, radii);
temp = sqrt(PI * diffCoef_total_React[ireact]* h_deltaT_adap[NUMDIFFTIMESTEPS-1]);
radii = radii*(1.0f + radii/temp);
for(ideltaT = 0; ideltaT < NUMDIFFTIMESTEPS; ideltaT++)
{
temp = sqrt(PI * diffCoef_total_React[ireact] * h_deltaT_adap[ideltaT]);
// calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = radii;
//calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = radii * 0.5f + sqrt(radii * radii * 0.25f + radii * temp);
calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = sqrt(temp * temp * 0.25f + temp * radii) - temp * 0.5f;
if(max_calc_radii_React[ideltaT] < calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT])
max_calc_radii_React[ideltaT] = calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT];
//printf("ireact = %d, ideltaT = %d, calc_radii_React = %f\n", ireact, ideltaT, calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT]);
}
}
}
ParticleData::ParticleData()
{
posx = new float[MAXNUMPAR];
posy = new float[MAXNUMPAR];
posz = new float[MAXNUMPAR];
ptype = new unsigned char[MAXNUMPAR];
index= new int[MAXNUMPAR];
ttime = new float[MAXNUMPAR];
converTable[0] = 0;
converTable[1] = 1;
converTable[2] = 2;
converTable[3] = 3;
converTable[4] = 4;
converTable[5] = 5;
converTable[6] = 6;
converTable[7] = 7;
converTable[8] = 8;
converTable[9] = 9;
}
ParticleData::~ParticleData()
{
delete[] posx;
delete[] posy;
delete[] posz;
delete[] ptype;
delete[] index;
delete[] ttime;
}
void ParticleData::readInitialParticles_RITRACK(string fname)
{
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 390 was not opened\n");
exit(1);
}
printf("%s\n", fname.c_str());
char buffer[256];
fgets(buffer, 100, fp);
printf("%s\n", buffer);
fscanf(fp, "%d\n", &initnumPar);
printf("%d\n", initnumPar);
fgets(buffer, 100, fp);
//printf("%s\n", buffer);
initTime = 0.0f;
float tempTime;
int tempPtype;
//FILE *fp1 = fopen("test.txt", "w");
int k = 0;
float posx_temp, posy_temp, posz_temp;
for (int i = 0; i < initnumPar; i++)
{
fscanf(fp, "%e %e %e %d %e", &posx_temp, &posy_temp, &posz_temp, &tempPtype, &tempTime);
//printf("%e %e %e %d %e\n", posx_temp, posy_temp, posz_temp, tempPtype, tempTime);
if(converTable[tempPtype-1]!= -1)
{
ptype[k] = converTable[tempPtype-1];
posx[k] = posx_temp * 0.1f; // Angstrom to nm
posy[k] = posy_temp * 0.1f;
posz[k] = posz_temp * 0.1f;
//fprintf(fp1, "%d %d %f %f %f\n", k, ptype[k], posx[k], posy[k], posz[k]);
//printf("%d %d %f %f %f %e\n", k, ptype[k], posx[k], posy[k], posz[k], tempTime);
k++;
}
if(initTime < tempTime)
initTime = tempTime;
}
initnumPar = k;
printf("%d\n", initnumPar);
printf("initTime = %e\n", initTime);
fclose(fp);
}
void ParticleData::readInitialParticles_GEANT4(string fname) // load the results obtained from geant4-DNA
{
FILE *fp = fopen(fname.c_str(), "rb");
fseek(fp,0,SEEK_END);
initnumPar = ftell(fp)/24;
printf("Number of loaded particles is %d\n", initnumPar);
fseek(fp,0,SEEK_SET);
fread(posx, sizeof(float),initnumPar, fp);
fread(posy, sizeof(float),initnumPar, fp);
fread(posz, sizeof(float),initnumPar, fp);
fread(ttime, sizeof(float),initnumPar, fp);
fread(index, sizeof(int),initnumPar, fp);
for(int i=0;i<initnumPar;i++)
fread(&(ptype[i]), sizeof(int),1, fp);
fclose(fp);
}
void initGPUVariables(ChemistrySpec *chemistrySpec, ReactionType *reactType, ParticleData *parData)
{
//gpu variables from ChemistrySpec class
printf("Start GPU memory initialization\n");
CUDA_CALL(hipMemcpyToSymbol(d_diffCoef_spec, chemistrySpec->diffCoef_spec, sizeof(float)*chemistrySpec->numSpecType, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_radii_spec, chemistrySpec->radii_spec, sizeof(float)*chemistrySpec->numSpecType, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_maxDiffCoef_spec, chemistrySpec->diffCoef_spec, sizeof(float), 0, hipMemcpyHostToDevice));
//gpu variables from ReactionType class
CUDA_CALL(hipMemcpyToSymbol(d_numReactant_React, reactType->numReactant_React, sizeof(int)*reactType->numReact, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_indexReactant_React, reactType->indexReactant_React, sizeof(int)*(reactType->numReact + 1), 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_typeReactant_React, reactType->typeReactant_React, sizeof(int)*reactType->indexReactant_React[reactType->numReact], 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_numNewPar_React, reactType->numNewPar_React, sizeof(int)*reactType->numReact, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_indexNewPar_React, reactType->indexNewPar_React, sizeof(int)*(reactType->numReact + 1), 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_typeNewPar_React, reactType->typeNewPar_React, sizeof(int)*reactType->indexNewPar_React[reactType->numReact], 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_numReactant_Par, reactType->numReactant_Par, sizeof(float)*chemistrySpec->numSpecType, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_typeReactant_Par, reactType->typeReactant_Par, sizeof(float)*chemistrySpec->numSpecType*MAXNUMREACTANT4PAR, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_subtypeReact_Par, reactType->subtypeReact_Par, sizeof(float)*chemistrySpec->numSpecType*MAXNUMREACTANT4PAR, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_kobs_React, reactType->kobs_React, sizeof(float)*reactType->numReact, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_calc_radii_React, reactType->calc_radii_React, sizeof(float)*reactType->numReact * NUMDIFFTIMESTEPS, 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpyToSymbol(d_prob_React, reactType->prob_React, sizeof(float)*reactType->numReact, 0, hipMemcpyHostToDevice));
//gpu class from ParticleData class
numCurPar = parData->initnumPar;
iniPar = int(numCurPar * 2.1);
CUDA_CALL(hipMalloc((void **) &d_posx, sizeof(float)* iniPar));
CUDA_CALL(hipMemcpy(d_posx, parData->posx, sizeof(float)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_posy, sizeof(float)*iniPar));
CUDA_CALL(hipMemcpy(d_posy, parData->posy, sizeof(float)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_posz, sizeof(float)*iniPar));
CUDA_CALL(hipMemcpy(d_posz, parData->posz, sizeof(float)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_ptype, sizeof(unsigned char)*iniPar));
CUDA_CALL(hipMemset(d_ptype, 255, sizeof(unsigned char) * iniPar));
CUDA_CALL(hipMemcpy(d_ptype, parData->ptype, sizeof(unsigned char)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_index, sizeof(int)*iniPar));
CUDA_CALL(hipMemcpy(d_index, parData->index, sizeof(int)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_index+numCurPar, parData->index, sizeof(int)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_ttime, sizeof(float)*iniPar));
CUDA_CALL(hipMemcpy(d_ttime, parData->ttime, sizeof(float)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(d_ttime+numCurPar, parData->ttime, sizeof(float)*numCurPar, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_posx_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posy_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posz_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_ptype_s, sizeof(unsigned int)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posx_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posy_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posz_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posx_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posy_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_posz_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_gridParticleHash, sizeof(unsigned long)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_gridParticleIndex, sizeof(int)*int(numCurPar * 1.5)));
CUDA_CALL(hipMalloc((void **) &d_accumParidxBin, sizeof(int)* (MAXNUMNZBIN + 1)));
CUDA_CALL(hipMalloc((void **) &d_nzBinidx, sizeof(unsigned long)* MAXNUMNZBIN));
CUDA_CALL(hipMalloc((void **) &d_idxnzBin_neig, sizeof(int)* MAXNUMNZBIN * 27));
CUDA_CALL(hipMalloc((void **) &d_idxnzBin_numNeig, sizeof(int)* MAXNUMNZBIN));
CUDA_CALL(hipMalloc((void **) &d_mintd_Par, sizeof(float)*int(numCurPar * 1.5)));
h_mintd_Par_init = new float[int(numCurPar * 1.5)];
for(int i = 0; i< int(numCurPar * 1.5); i++)
{
h_mintd_Par_init[i] = 1.0e6f;
}
int tempNumNewPar = 0;
CUDA_CALL(hipMemcpyToSymbol(d_numNewPar, &tempNumNewPar, sizeof(int), 0, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc((void **) &d_statusPar, sizeof(unsigned char)*iniPar));
iniCurPar = numCurPar;
float aa=clock();
inirngG(0);
float bb=clock();
printf("initialization of rand is %f\n", (bb-aa)/CLOCKS_PER_SEC);
printf("Finish initialization of random number\n");
}
| b7709bfd67ce45987d4e6e782586d78bc030b1ea.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
using namespace std;
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "microMC_chem.h"
ChemistrySpec::ChemistrySpec()
{
}
ChemistrySpec::~ChemistrySpec()
{
}
void ChemistrySpec::initChemistry(string fileprefix)
{
string fname;
fname = fileprefix;
fname += string("/RadiolyticSpecies.txt");
readRadiolyticSpecies(fname);
}
void ChemistrySpec::readRadiolyticSpecies(string fname)
{
char buffer[256];
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 37 was not opened\n");
exit(1);
}
fgets(buffer, 100, fp);
fscanf(fp, "%d\n", &numSpecType);
printf("%s\n", buffer);
printf("%d\n", numSpecType);
diffCoef_spec = new float[numSpecType];
radii_spec = new float[numSpecType];
fgets(buffer, 100, fp);
char specName[50];
int temp;
maxDiffCoef_spec = 0;
for (int i = 0; i < numSpecType; i++)
{
fscanf(fp, "%d %s %f %f\n", &temp, specName, &diffCoef_spec[i], &radii_spec[i]);
diffCoef_spec[i] = diffCoef_spec[i] * 1.0e-3f; // convert the diffusion coefficient from 10-9 m^2/s to nm^2/ps
Name_spec.push_back(specName);
if (maxDiffCoef_spec < diffCoef_spec[i])
maxDiffCoef_spec = diffCoef_spec[i];
// printf("i = %d, name of the particle is %s, diffusion coefficient is %e\n", i, Name_spec[i].c_str(), diffCoef_spec[i]);
}
fclose(fp);
}
ReactionType::ReactionType()
{
h_deltaT_adap[0] = 0.1f;
h_deltaT_adap[1] = 1.0f;
h_deltaT_adap[2] = 3.0f;
h_deltaT_adap[3] = 10.0f;
h_deltaT_adap[4] = 100.0f;
max_calc_radii_React[0] = 0.f;
max_calc_radii_React[1] = 0.f;
max_calc_radii_React[2] = 0.f;
max_calc_radii_React[3] = 0.f;
max_calc_radii_React[4] = 0.f;
}
ReactionType::~ReactionType()
{
}
void ReactionType::initReaction(ChemistrySpec chemistrySpec, string fileprefix)
{
string fname;
fname = fileprefix;
fname += string("_ReactionInfo_orig.txt");
readReactionTypeInfo(chemistrySpec, fname);
setReactantList_Par(chemistrySpec);
calcReactionRadius(chemistrySpec);
}
void ReactionType::readReactionTypeInfo(ChemistrySpec chemistrySpec, string fname)
{
char buffer[256];
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 116 was not opened\n");
exit(1);
}
fgets(buffer, 100, fp);
fscanf(fp, "%d\n", &numReact);
printf("%s\n", buffer);
printf("%d\n", numReact);
fgets(buffer, 200, fp);
int k1 = 0;
int k2 = 0;
int temp;
for (int i = 0; i < numReact; i++)
{
diffCoef_total_React[i] = 0.0f;
fscanf(fp, "%d ", &temp);
printf("i = %d, ", i);
fscanf(fp, "%d ", &numReactant_React[i]);
indexReactant_React[i] = k1;
for (int j = 0; j < numReactant_React[i]; j++)
{
fscanf(fp, "%d ", &typeReactant_React[k1]);
printf("%s ", chemistrySpec.Name_spec[typeReactant_React[k1]].c_str());
if(j < numReactant_React[i]-1) printf("+");//*/
diffCoef_total_React[i] += chemistrySpec.diffCoef_spec[typeReactant_React[k1]];
k1++;
}
fscanf(fp, "%d ", &numNewPar_React[i]);
indexNewPar_React[i] = k2;
printf(" = ");
for (int j = 0; j < numNewPar_React[i]; j++)
{
fscanf(fp, "%d ", &typeNewPar_React[k2]);
if(typeNewPar_React[k2]!=255)
printf("%s ", chemistrySpec.Name_spec[typeNewPar_React[k2]].c_str());
else
printf("H2O ");
if(j < numNewPar_React[i]-1) printf("+");//*/
k2++;
}
fscanf(fp, "%e %f %f\n", &kobs_React[i], &radii_React[i], &prob_React[i]);
printf("%e\n", kobs_React[i]);
//printf("%e %f %f\n", kobs_React[i], radii_React[i], prob_React[i]);
}
indexReactant_React[numReact] = k1;
indexNewPar_React[numReact] = k2;
cout<<"total number of new particles: "<<indexNewPar_React[numReact]<<endl;
}
void ReactionType::setReactantList_Par(ChemistrySpec chemistrySpec)
//---------------------------------------------------------------------------------------
//reorgnize the data for each type of the particles for searching the neighbors that can
//have a reaction with the current particle
//----------------------------------------------------------------------------------------
{
int i, j, k, idx;
int tempParType;
for (i = 0; i < chemistrySpec.numSpecType; i++)
{
numReactant_Par[i] = 0;
}
for (i = 0; i < numReact; i++)
{
for (j = indexReactant_React[i]; j < indexReactant_React[i+1]; j++)
{
tempParType = typeReactant_React[j];
for (k = indexReactant_React[i]; k < indexReactant_React[i + 1]; k++)
{
if (k != j)
{
if (numReactant_Par[tempParType] == 0)
{
typeReactant_Par[tempParType * MAXNUMREACTANT4PAR] = typeReactant_React[k];
subtypeReact_Par[tempParType * MAXNUMREACTANT4PAR] = i;
numReactant_Par[tempParType]++;
}
if (numReactant_Par[tempParType] > 0)
{
for (idx = 0; idx < numReactant_Par[tempParType]; idx++)
{
if (typeReactant_React[k] == typeReactant_Par[tempParType * MAXNUMREACTANT4PAR + idx])
break;
}
if (idx == numReactant_Par[tempParType])
{
typeReactant_Par[tempParType * MAXNUMREACTANT4PAR + numReactant_Par[tempParType]] = typeReactant_React[k];
subtypeReact_Par[tempParType * MAXNUMREACTANT4PAR + numReactant_Par[tempParType]] = i;
numReactant_Par[tempParType]++;
}
}
}
}
}
}
}
void ReactionType::calcReactionRadius(ChemistrySpec chemistrySpec)
{
int ireact, ideltaT;
float radii;
float temp;
for(ireact = 0; ireact < numReact; ireact++)
{
radii = kobs_React[ireact]/1e10/756.8725/diffCoef_total_React[ireact];// radii_React[ireact];
//printf("reaction %d, radii=%f\n", ireact, radii);
temp = sqrt(PI * diffCoef_total_React[ireact]* h_deltaT_adap[NUMDIFFTIMESTEPS-1]);
radii = radii*(1.0f + radii/temp);
for(ideltaT = 0; ideltaT < NUMDIFFTIMESTEPS; ideltaT++)
{
temp = sqrt(PI * diffCoef_total_React[ireact] * h_deltaT_adap[ideltaT]);
// calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = radii;
//calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = radii * 0.5f + sqrt(radii * radii * 0.25f + radii * temp);
calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT] = sqrt(temp * temp * 0.25f + temp * radii) - temp * 0.5f;
if(max_calc_radii_React[ideltaT] < calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT])
max_calc_radii_React[ideltaT] = calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT];
//printf("ireact = %d, ideltaT = %d, calc_radii_React = %f\n", ireact, ideltaT, calc_radii_React[ireact * NUMDIFFTIMESTEPS + ideltaT]);
}
}
}
ParticleData::ParticleData()
{
posx = new float[MAXNUMPAR];
posy = new float[MAXNUMPAR];
posz = new float[MAXNUMPAR];
ptype = new unsigned char[MAXNUMPAR];
index= new int[MAXNUMPAR];
ttime = new float[MAXNUMPAR];
converTable[0] = 0;
converTable[1] = 1;
converTable[2] = 2;
converTable[3] = 3;
converTable[4] = 4;
converTable[5] = 5;
converTable[6] = 6;
converTable[7] = 7;
converTable[8] = 8;
converTable[9] = 9;
}
ParticleData::~ParticleData()
{
delete[] posx;
delete[] posy;
delete[] posz;
delete[] ptype;
delete[] index;
delete[] ttime;
}
void ParticleData::readInitialParticles_RITRACK(string fname)
{
FILE *fp = fopen(fname.c_str(), "r");
if (fp == NULL)
{
printf("The file in line 390 was not opened\n");
exit(1);
}
printf("%s\n", fname.c_str());
char buffer[256];
fgets(buffer, 100, fp);
printf("%s\n", buffer);
fscanf(fp, "%d\n", &initnumPar);
printf("%d\n", initnumPar);
fgets(buffer, 100, fp);
//printf("%s\n", buffer);
initTime = 0.0f;
float tempTime;
int tempPtype;
//FILE *fp1 = fopen("test.txt", "w");
int k = 0;
float posx_temp, posy_temp, posz_temp;
for (int i = 0; i < initnumPar; i++)
{
fscanf(fp, "%e %e %e %d %e", &posx_temp, &posy_temp, &posz_temp, &tempPtype, &tempTime);
//printf("%e %e %e %d %e\n", posx_temp, posy_temp, posz_temp, tempPtype, tempTime);
if(converTable[tempPtype-1]!= -1)
{
ptype[k] = converTable[tempPtype-1];
posx[k] = posx_temp * 0.1f; // Angstrom to nm
posy[k] = posy_temp * 0.1f;
posz[k] = posz_temp * 0.1f;
//fprintf(fp1, "%d %d %f %f %f\n", k, ptype[k], posx[k], posy[k], posz[k]);
//printf("%d %d %f %f %f %e\n", k, ptype[k], posx[k], posy[k], posz[k], tempTime);
k++;
}
if(initTime < tempTime)
initTime = tempTime;
}
initnumPar = k;
printf("%d\n", initnumPar);
printf("initTime = %e\n", initTime);
fclose(fp);
}
void ParticleData::readInitialParticles_GEANT4(string fname) // load the results obtained from geant4-DNA
{
FILE *fp = fopen(fname.c_str(), "rb");
fseek(fp,0,SEEK_END);
initnumPar = ftell(fp)/24;
printf("Number of loaded particles is %d\n", initnumPar);
fseek(fp,0,SEEK_SET);
fread(posx, sizeof(float),initnumPar, fp);
fread(posy, sizeof(float),initnumPar, fp);
fread(posz, sizeof(float),initnumPar, fp);
fread(ttime, sizeof(float),initnumPar, fp);
fread(index, sizeof(int),initnumPar, fp);
for(int i=0;i<initnumPar;i++)
fread(&(ptype[i]), sizeof(int),1, fp);
fclose(fp);
}
void initGPUVariables(ChemistrySpec *chemistrySpec, ReactionType *reactType, ParticleData *parData)
{
//gpu variables from ChemistrySpec class
printf("Start GPU memory initialization\n");
CUDA_CALL(cudaMemcpyToSymbol(d_diffCoef_spec, chemistrySpec->diffCoef_spec, sizeof(float)*chemistrySpec->numSpecType, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_radii_spec, chemistrySpec->radii_spec, sizeof(float)*chemistrySpec->numSpecType, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_maxDiffCoef_spec, chemistrySpec->diffCoef_spec, sizeof(float), 0, cudaMemcpyHostToDevice));
//gpu variables from ReactionType class
CUDA_CALL(cudaMemcpyToSymbol(d_numReactant_React, reactType->numReactant_React, sizeof(int)*reactType->numReact, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_indexReactant_React, reactType->indexReactant_React, sizeof(int)*(reactType->numReact + 1), 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_typeReactant_React, reactType->typeReactant_React, sizeof(int)*reactType->indexReactant_React[reactType->numReact], 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_numNewPar_React, reactType->numNewPar_React, sizeof(int)*reactType->numReact, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_indexNewPar_React, reactType->indexNewPar_React, sizeof(int)*(reactType->numReact + 1), 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_typeNewPar_React, reactType->typeNewPar_React, sizeof(int)*reactType->indexNewPar_React[reactType->numReact], 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_numReactant_Par, reactType->numReactant_Par, sizeof(float)*chemistrySpec->numSpecType, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_typeReactant_Par, reactType->typeReactant_Par, sizeof(float)*chemistrySpec->numSpecType*MAXNUMREACTANT4PAR, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_subtypeReact_Par, reactType->subtypeReact_Par, sizeof(float)*chemistrySpec->numSpecType*MAXNUMREACTANT4PAR, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_kobs_React, reactType->kobs_React, sizeof(float)*reactType->numReact, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_calc_radii_React, reactType->calc_radii_React, sizeof(float)*reactType->numReact * NUMDIFFTIMESTEPS, 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpyToSymbol(d_prob_React, reactType->prob_React, sizeof(float)*reactType->numReact, 0, cudaMemcpyHostToDevice));
//gpu class from ParticleData class
numCurPar = parData->initnumPar;
iniPar = int(numCurPar * 2.1);
CUDA_CALL(cudaMalloc((void **) &d_posx, sizeof(float)* iniPar));
CUDA_CALL(cudaMemcpy(d_posx, parData->posx, sizeof(float)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_posy, sizeof(float)*iniPar));
CUDA_CALL(cudaMemcpy(d_posy, parData->posy, sizeof(float)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_posz, sizeof(float)*iniPar));
CUDA_CALL(cudaMemcpy(d_posz, parData->posz, sizeof(float)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_ptype, sizeof(unsigned char)*iniPar));
CUDA_CALL(cudaMemset(d_ptype, 255, sizeof(unsigned char) * iniPar));
CUDA_CALL(cudaMemcpy(d_ptype, parData->ptype, sizeof(unsigned char)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_index, sizeof(int)*iniPar));
CUDA_CALL(cudaMemcpy(d_index, parData->index, sizeof(int)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_index+numCurPar, parData->index, sizeof(int)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_ttime, sizeof(float)*iniPar));
CUDA_CALL(cudaMemcpy(d_ttime, parData->ttime, sizeof(float)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_ttime+numCurPar, parData->ttime, sizeof(float)*numCurPar, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_posx_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posy_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posz_s, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_ptype_s, sizeof(unsigned int)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posx_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posy_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posz_d, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posx_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posy_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_posz_sd, sizeof(float)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_gridParticleHash, sizeof(unsigned long)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_gridParticleIndex, sizeof(int)*int(numCurPar * 1.5)));
CUDA_CALL(cudaMalloc((void **) &d_accumParidxBin, sizeof(int)* (MAXNUMNZBIN + 1)));
CUDA_CALL(cudaMalloc((void **) &d_nzBinidx, sizeof(unsigned long)* MAXNUMNZBIN));
CUDA_CALL(cudaMalloc((void **) &d_idxnzBin_neig, sizeof(int)* MAXNUMNZBIN * 27));
CUDA_CALL(cudaMalloc((void **) &d_idxnzBin_numNeig, sizeof(int)* MAXNUMNZBIN));
CUDA_CALL(cudaMalloc((void **) &d_mintd_Par, sizeof(float)*int(numCurPar * 1.5)));
h_mintd_Par_init = new float[int(numCurPar * 1.5)];
for(int i = 0; i< int(numCurPar * 1.5); i++)
{
h_mintd_Par_init[i] = 1.0e6f;
}
int tempNumNewPar = 0;
CUDA_CALL(cudaMemcpyToSymbol(d_numNewPar, &tempNumNewPar, sizeof(int), 0, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc((void **) &d_statusPar, sizeof(unsigned char)*iniPar));
iniCurPar = numCurPar;
float aa=clock();
inirngG(0);
float bb=clock();
printf("initialization of rand is %f\n", (bb-aa)/CLOCKS_PER_SEC);
printf("Finish initialization of random number\n");
}
|
385f81c0b7189889745b9d28c0c378c7f87dd7be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.0;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0;
}
out.data[i] = v;
}
*/
}
__global__ void calcReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
dz_in[id] += dz_next_layer[id];
dz[id] += (in[id] < 0) ? (0) : (1.0 * dz_in[id]);
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
dz.data[i] += (in.data[i] < 0) ? (0) : (1.0 * dz_in.data[i]);
}
*/
}
void reluForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcReluForwardGPU), dim3(grid), dim3(BLOCK), 0, 0, in, out, N);
}
void reluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcReluBackwardGPU), dim3(grid), dim3(BLOCK), 0, 0, dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
| 385f81c0b7189889745b9d28c0c378c7f87dd7be.cu | #include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.0;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0;
}
out.data[i] = v;
}
*/
}
__global__ void calcReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
dz_in[id] += dz_next_layer[id];
dz[id] += (in[id] < 0) ? (0) : (1.0 * dz_in[id]);
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
dz.data[i] += (in.data[i] < 0) ? (0) : (1.0 * dz_in.data[i]);
}
*/
}
void reluForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcReluForwardGPU<<<grid, BLOCK>>>(in, out, N);
}
void reluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcReluBackwardGPU<<<grid, BLOCK>>>( dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
|
685d12f704d3e7e647d0ef5124325a5d0745616e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <hip/hip_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_KERNEL_FUNCTION(kHSAlphaStep,
((GF_PTR(float4))(inImgUVAvg))
((GF_PTR(float4))(inImgX))
((GF_PTR(float4))(inImgY))
((GF_PTR(float4))(inImgT))
((GF_PTR(float4))(destImg)),
((int)(inPitch))
((int)(destPitch))
((int)(alpha))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(inXY)(KERNEL_XY)))
{
float4 uvAvg, dest;
float dX, dY, dT;
if (inXY.x >= outWidth || inXY.y >= outHeight) return;
dX = ReadFloat4(inImgX, inXY.y * inPitch + inXY.x, !!in16f).x;
dY = ReadFloat4(inImgY, inXY.y * inPitch + inXY.x, !!in16f).x;
dT = ReadFloat4(inImgT, inXY.y * inPitch + inXY.x, !!in16f).x;
uvAvg = ReadFloat4(inImgUVAvg, inXY.y * inPitch + inXY.x, !!in16f);
dest.x = uvAvg.x - (dX*((dX * uvAvg.x) + (dY * uvAvg.y) + dT)) / (alpha* alpha + dX * dX + dY * dY);
dest.y = uvAvg.y - (dY*((dX * uvAvg.x) + (dY * uvAvg.y) + dT)) / (alpha*alpha + dX*dX + dY*dY);
dest.z = 0;
dest.w = 1.0;
dest.x = dest.x > 10000 ? 0 : dest.x;
dest.y = dest.y > 10000 ? 0 : dest.y;
dest.x = dest.x < -10000 ? 0 : dest.x;
dest.y = dest.y < -10000 ? 0 : dest.y;
WriteFloat4(dest, destImg, inXY.y * destPitch + inXY.x, !!in16f);
}
#endif
#if __NVCC__
void HSAlphaStep_CUDA (
float *inBufUVAvg,
float *inBufX,
float *inBufY,
float *inBufT,
float *destBuf,
int inPitch,
int destPitch,
int alpha,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim (16, 16, 1);
dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 );
hipLaunchKernelGGL(( kHSAlphaStep) , dim3(gridDim), dim3(blockDim), 0 , 0, (float4*)inBufUVAvg, (float4*)inBufX, (float4*)inBufY, (float4*)inBufT, (float4*) destBuf, inPitch, destPitch, alpha, is16f, width, height );
hipDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
| 685d12f704d3e7e647d0ef5124325a5d0745616e.cu |
#ifndef RESAMPLE_BILINEAR
#define RESAMPLE_BILINEAR
#if __CUDACC_VER_MAJOR__ >= 9
#include <cuda_fp16.h>
#endif
#include "PrGPU/KernelSupport/KernelCore.h" //includes KernelWrapper.h
#include "PrGPU/KernelSupport/KernelMemory.h"
#if GF_DEVICE_TARGET_DEVICE
GF_KERNEL_FUNCTION(kHSAlphaStep,
((GF_PTR(float4))(inImgUVAvg))
((GF_PTR(float4))(inImgX))
((GF_PTR(float4))(inImgY))
((GF_PTR(float4))(inImgT))
((GF_PTR(float4))(destImg)),
((int)(inPitch))
((int)(destPitch))
((int)(alpha))
((int)(in16f))
((unsigned int)(outWidth))
((unsigned int)(outHeight)),
((uint2)(inXY)(KERNEL_XY)))
{
float4 uvAvg, dest;
float dX, dY, dT;
if (inXY.x >= outWidth || inXY.y >= outHeight) return;
dX = ReadFloat4(inImgX, inXY.y * inPitch + inXY.x, !!in16f).x;
dY = ReadFloat4(inImgY, inXY.y * inPitch + inXY.x, !!in16f).x;
dT = ReadFloat4(inImgT, inXY.y * inPitch + inXY.x, !!in16f).x;
uvAvg = ReadFloat4(inImgUVAvg, inXY.y * inPitch + inXY.x, !!in16f);
dest.x = uvAvg.x - (dX*((dX * uvAvg.x) + (dY * uvAvg.y) + dT)) / (alpha* alpha + dX * dX + dY * dY);
dest.y = uvAvg.y - (dY*((dX * uvAvg.x) + (dY * uvAvg.y) + dT)) / (alpha*alpha + dX*dX + dY*dY);
dest.z = 0;
dest.w = 1.0;
dest.x = dest.x > 10000 ? 0 : dest.x;
dest.y = dest.y > 10000 ? 0 : dest.y;
dest.x = dest.x < -10000 ? 0 : dest.x;
dest.y = dest.y < -10000 ? 0 : dest.y;
WriteFloat4(dest, destImg, inXY.y * destPitch + inXY.x, !!in16f);
}
#endif
#if __NVCC__
void HSAlphaStep_CUDA (
float *inBufUVAvg,
float *inBufX,
float *inBufY,
float *inBufT,
float *destBuf,
int inPitch,
int destPitch,
int alpha,
int is16f,
unsigned int width,
unsigned int height)
{
dim3 blockDim (16, 16, 1);
dim3 gridDim ( (width + blockDim.x - 1)/ blockDim.x, (height + blockDim.y - 1) / blockDim.y, 1 );
kHSAlphaStep <<< gridDim, blockDim, 0 >>> ((float4*)inBufUVAvg, (float4*)inBufX, (float4*)inBufY, (float4*)inBufT, (float4*) destBuf, inPitch, destPitch, alpha, is16f, width, height );
cudaDeviceSynchronize();
}
#endif //GF_DEVICE_TARGET_HOST
#endif //SDK_CROSS_DISSOLVE
|
fdc74bc1821a2fe7207e97507ae73fcd01110a68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdlib.h>
#include "summation_kernel.cu"
// CPU implementation
float log2_series(int n) {
float res = 0;
for(int i = 0; i < n; i++) {
if(i% 2 == 0) {
res += 1.f/(i+1);
} else {
res -= 1.0f/(i+1);
}
}
return res;
}
int main(int argc, char ** argv) {
int data_size = 1024 * 1024 * 128;
// Run CPU version
double start_time = getclock();
float log2 = log2_series(data_size);
double end_time = getclock();
printf("CPU result: %f\n", log2);
printf(" log(2)=%f\n", log(2.0));
printf(" time=%fs\n", end_time - start_time);
// Parameter definition
int threads_per_block = 64;
int blocks_in_grid = 2;
int num_threads = threads_per_block * blocks_in_grid;
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
int results_size = num_threads;
// Allocating output data on CPU
float* data_out_cpu = (float*) malloc(results_size * sizeof(float));
// Allocating output data on GPU
float* data_out_gpu;
float* data_out;
hipMalloc((void**)&data_out_gpu, results_size * sizeof (float));
hipMalloc((void**)&data_out, results_size * sizeof (float));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Execute kernel
hipLaunchKernelGGL(( summation_kernel), dim3(blocks_in_grid),dim3(threads_per_block), 0, 0, data_size, data_out_gpu);
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Get results back
int smemSize = threads_per_block * sizeof(float);
while(blocks_in_grid > 1) {
hipLaunchKernelGGL(( reduce), dim3(blocks_in_grid), dim3(threads_per_block), smemSize, 0, data_size, data_out_gpu, data_out);
data_out_gpu = data_out;
threads_per_block = blocks_in_grid;
if(blocks_in_grid >= threads_per_block) {
blocks_in_grid /= threads_per_block;
} else {
blocks_in_grid /= blocks_in_grid;
}
}
// Finish reduction
float sum = 0.;
hipMemcpy(data_out_cpu, data_out_gpu, results_size * sizeof (float), hipMemcpyDeviceToHost);
sum = data_out_cpu[0];
/* Code avant la partie 2 du TP
* for (int i = 0 ; i<results_size; i++){
* sum += data_out_cpu[i];
* printf("%d\n",i);
*}
*/
// Cleanup
hipFree(data_out_gpu);
free(data_out_cpu);
printf("GPU results:\n");
printf(" Sum: %f\n", sum);
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
double total_time = elapsedTime / 1000.; // s
double time_per_iter = total_time / (double)data_size;
double bandwidth = sizeof(float) / time_per_iter; // B/s
printf(" Total time: %g s,\n Per iteration: %g ns\n Throughput: %g GB/s\n",
total_time,
time_per_iter * 1.e9,
bandwidth / 1.e9);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
return 0;
}
| fdc74bc1821a2fe7207e97507ae73fcd01110a68.cu | #include "utils.h"
#include <stdlib.h>
#include "summation_kernel.cu"
// CPU implementation
float log2_series(int n) {
float res = 0;
for(int i = 0; i < n; i++) {
if(i% 2 == 0) {
res += 1.f/(i+1);
} else {
res -= 1.0f/(i+1);
}
}
return res;
}
int main(int argc, char ** argv) {
int data_size = 1024 * 1024 * 128;
// Run CPU version
double start_time = getclock();
float log2 = log2_series(data_size);
double end_time = getclock();
printf("CPU result: %f\n", log2);
printf(" log(2)=%f\n", log(2.0));
printf(" time=%fs\n", end_time - start_time);
// Parameter definition
int threads_per_block = 64;
int blocks_in_grid = 2;
int num_threads = threads_per_block * blocks_in_grid;
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
int results_size = num_threads;
// Allocating output data on CPU
float* data_out_cpu = (float*) malloc(results_size * sizeof(float));
// Allocating output data on GPU
float* data_out_gpu;
float* data_out;
cudaMalloc((void**)&data_out_gpu, results_size * sizeof (float));
cudaMalloc((void**)&data_out, results_size * sizeof (float));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Execute kernel
summation_kernel<<<blocks_in_grid,threads_per_block>>>(data_size, data_out_gpu);
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Get results back
int smemSize = threads_per_block * sizeof(float);
while(blocks_in_grid > 1) {
reduce<<<blocks_in_grid, threads_per_block, smemSize>>>(data_size, data_out_gpu, data_out);
data_out_gpu = data_out;
threads_per_block = blocks_in_grid;
if(blocks_in_grid >= threads_per_block) {
blocks_in_grid /= threads_per_block;
} else {
blocks_in_grid /= blocks_in_grid;
}
}
// Finish reduction
float sum = 0.;
cudaMemcpy(data_out_cpu, data_out_gpu, results_size * sizeof (float), cudaMemcpyDeviceToHost);
sum = data_out_cpu[0];
/* Code avant la partie 2 du TP
* for (int i = 0 ; i<results_size; i++){
* sum += data_out_cpu[i];
* printf("%d\n",i);
*}
*/
// Cleanup
cudaFree(data_out_gpu);
free(data_out_cpu);
printf("GPU results:\n");
printf(" Sum: %f\n", sum);
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
double total_time = elapsedTime / 1000.; // s
double time_per_iter = total_time / (double)data_size;
double bandwidth = sizeof(float) / time_per_iter; // B/s
printf(" Total time: %g s,\n Per iteration: %g ns\n Throughput: %g GB/s\n",
total_time,
time_per_iter * 1.e9,
bandwidth / 1.e9);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
return 0;
}
|
a89173c0a15cc40340fae9a84e2ff76ddcd72beb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_add_sources_kernel(float * accel, const int * ibool, const float * sourcearrays, const double * stf_pre_compute, const int myrank, const int * islice_selected_source, const int * ispec_selected_source, const int NSOURCES){
int ispec;
int iglob;
float stf;
int isource;
int i;
int j;
int k;
i = threadIdx.x;
j = threadIdx.y;
k = threadIdx.z;
isource = blockIdx.x + (gridDim.x) * (blockIdx.y);
if (isource < NSOURCES) {
if (myrank == islice_selected_source[isource]) {
ispec = ispec_selected_source[isource] - (1);
stf = stf_pre_compute[isource];
iglob = ibool[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] - (1);
atomicAdd(accel + (iglob) * (3) + 0, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 0, i, j, k, isource)]) * (stf));
atomicAdd(accel + (iglob) * (3) + 1, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 1, i, j, k, isource)]) * (stf));
atomicAdd(accel + (iglob) * (3) + 2, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 2, i, j, k, isource)]) * (stf));
}
}
}
| a89173c0a15cc40340fae9a84e2ff76ddcd72beb.cu | //note: please do not modify this file manually!
// this file has been generated automatically by BOAST version 0.99996
// by: make boast_kernels
/*
!=====================================================================
!
! S p e c f e m 3 D G l o b e V e r s i o n 7 . 0
! --------------------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA
! and CNRS / University of Marseille, France
! (there are currently many more authors!)
! (c) Princeton University and CNRS / University of Marseille, April 2014
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#ifndef INDEX2
#define INDEX2(isize,i,j) i + isize*j
#endif
#ifndef INDEX3
#define INDEX3(isize,jsize,i,j,k) i + isize*(j + jsize*k)
#endif
#ifndef INDEX4
#define INDEX4(isize,jsize,ksize,i,j,k,x) i + isize*(j + jsize*(k + ksize*x))
#endif
#ifndef INDEX5
#define INDEX5(isize,jsize,ksize,xsize,i,j,k,x,y) i + isize*(j + jsize*(k + ksize*(x + xsize*y)))
#endif
#ifndef NDIM
#define NDIM 3
#endif
#ifndef NGLLX
#define NGLLX 5
#endif
#ifndef NGLL2
#define NGLL2 25
#endif
#ifndef NGLL3
#define NGLL3 125
#endif
#ifndef NGLL3_PADDED
#define NGLL3_PADDED 128
#endif
#ifndef N_SLS
#define N_SLS 3
#endif
#ifndef IREGION_CRUST_MANTLE
#define IREGION_CRUST_MANTLE 1
#endif
#ifndef IREGION_INNER_CORE
#define IREGION_INNER_CORE 3
#endif
#ifndef IFLAG_IN_FICTITIOUS_CUBE
#define IFLAG_IN_FICTITIOUS_CUBE 11
#endif
#ifndef R_EARTH_KM
#define R_EARTH_KM 6371.0f
#endif
#ifndef COLORING_MIN_NSPEC_INNER_CORE
#define COLORING_MIN_NSPEC_INNER_CORE 1000
#endif
#ifndef COLORING_MIN_NSPEC_OUTER_CORE
#define COLORING_MIN_NSPEC_OUTER_CORE 1000
#endif
#ifndef BLOCKSIZE_TRANSFER
#define BLOCKSIZE_TRANSFER 256
#endif
__global__ void compute_add_sources_kernel(float * accel, const int * ibool, const float * sourcearrays, const double * stf_pre_compute, const int myrank, const int * islice_selected_source, const int * ispec_selected_source, const int NSOURCES){
int ispec;
int iglob;
float stf;
int isource;
int i;
int j;
int k;
i = threadIdx.x;
j = threadIdx.y;
k = threadIdx.z;
isource = blockIdx.x + (gridDim.x) * (blockIdx.y);
if (isource < NSOURCES) {
if (myrank == islice_selected_source[isource]) {
ispec = ispec_selected_source[isource] - (1);
stf = stf_pre_compute[isource];
iglob = ibool[INDEX4(NGLLX, NGLLX, NGLLX, i, j, k, ispec)] - (1);
atomicAdd(accel + (iglob) * (3) + 0, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 0, i, j, k, isource)]) * (stf));
atomicAdd(accel + (iglob) * (3) + 1, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 1, i, j, k, isource)]) * (stf));
atomicAdd(accel + (iglob) * (3) + 2, (sourcearrays[INDEX5(NDIM, NGLLX, NGLLX, NGLLX, 2, i, j, k, isource)]) * (stf));
}
}
}
|
d1da191a8fef2150d4fbdba5c2a7befb1c5d95c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
using namespace std;
__global__ void add(int a, int b, int *c)//kernelgpu
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));//gpu
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c);//kernel<<<1,1>>>gpu11
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);//gpu
//dev_csizeof(int)&c
cout << "2 + 7 = " << c << endl;
hipFree(dev_c);//cudaMalloc
return 0;
} | d1da191a8fef2150d4fbdba5c2a7befb1c5d95c6.cu | #include<iostream>
using namespace std;
__global__ void add(int a, int b, int *c)//kernel函数,在gpu上运行。
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));//分配gpu的内存,第一个参数指向新分配内存的地址,第二个参数是分配内存的大小。
add<<<1,1>>>(2, 7, dev_c);//调用kernel函数,<<<1,1>>>指gpu启动1个线程块,每个线程块中有1个线程。
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);//将gpu上的数据复制到主机上,
//即从dev_c指向的存储区域中将sizeof(int)个字节复制到&c指向的存储区域。
cout << "2 + 7 = " << c << endl;
cudaFree(dev_c);//释放cudaMalloc分配的内存。
return 0;
} |
a2b396e859b64aa399eeb2a060fafb98159e1e68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
#define THREADS_PER_BLOCK 1024
__global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y[index];
}
}
void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] += y[index];
}
}
void Fadd_inplace_impl(dtype * x, const dtype *y, int size) {
hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
dtype sum = x[index];
int offset = 0;
for(int idx = 0; idx < count; idx++) {
int global = index + offset;
int idx = global / size;
int idy = global % size;
sum += y[idx][idy];
offset += size;
}
r[index] = sum;
}
}
void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) {
hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, count, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = x[idx][idy] + y[idx][idy];
}
}
void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) {
hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype** x, dtype** y, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
x[idx][idy] += y[idx][idy];
}
}
void Fadd_inplace_impl(dtype** x, dtype** y, int dim0, int size) {
hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype* x, dtype** y, int count, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
dtype sum = x[index];
int offset = 0;
for(int idx = 0; idx < count; idx++) {
int global = index + offset;
int idx = global / size;
int idy = global % size;
sum += y[idx][idy];
offset += size;
}
x[index] = sum;
}
}
void Fadd_inplace_impl(dtype* x, dtype** y, int count, int size) {
hipLaunchKernelGGL(( Fadd_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, count, size);
//hipDeviceSynchronize();
}
__global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] - y[index];
}
}
void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fsubtract_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fsubtract_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] -= y[index];
}
}
void Fsubtract_inplace_impl(dtype* x, const dtype* y, int size) {
hipLaunchKernelGGL(( Fsubtract_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y[index];
}
}
void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] *= y[index];
}
}
void Fmultiply_inplace_impl(dtype* x, const dtype* y, int size) {
hipLaunchKernelGGL(( Fmultiply_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = x[idx][idy] * y[idx][idy];
}
}
void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) {
hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_inplace_kernel(dtype** x, dtype** y, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
x[idx][idy] *= y[idx][idy];
}
}
void Fmultiply_inplace_impl(dtype** x, dtype** y, int dim0, int size) {
hipLaunchKernelGGL(( Fmultiply_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] / y[index];
}
}
void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Fdivide_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y;
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype* scalar, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * scalar[0];
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype* scalar, dtype* r, int size) {
hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, scalar, r, size);
//hipDeviceSynchronize();
}
__global__ void Fmultiply_scalar_inplace_kernel(dtype* x, const dtype y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] *= y;
}
}
void Fmultiply_scalar_inplace_impl(dtype* x, const dtype y, int size) {
hipLaunchKernelGGL(( Fmultiply_scalar_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y;
}
}
void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
hipLaunchKernelGGL(( Fadd_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_scalar_inplace_kernel(dtype* x, const dtype y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] += y;
}
}
void Fadd_scalar_inplace_impl(dtype* x, const dtype y, int size) {
hipLaunchKernelGGL(( Fadd_scalar_inplace_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, size);
//hipDeviceSynchronize();
}
__global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * x[index];
}
}
void Fsquare_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsquare_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
//hipDeviceSynchronize();
}
__global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = tanh(x[index]);
}
}
void Ftanh_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
//hipDeviceSynchronize();
}
__global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = tanh(x[idx][idy]);
}
}
void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) {
hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy]));
}
}
void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) {
hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy];
}
}
void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){
hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 + y[index]) * (1 - y[index]);
}
}
void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]);
}
}
void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){
hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size);
//hipDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = 1.0 / (1.0 + exp(-x[index]));
}
}
void Fsigmoid_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
//hipDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 - y[index]) * y[index];
}
}
void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) {
hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size);
//hipDeviceSynchronize();
}
__global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = sqrt(x[index]);
}
}
void Fsqrt_impl(const dtype* x, dtype* r, int size) {
hipLaunchKernelGGL(( Fsqrt_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size);
//hipDeviceSynchronize();
}
__global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[offset + index] = src[index];
}
}
void concat_impl(const dtype *src, dtype* dst, int offset, int dim) {
hipLaunchKernelGGL(( concat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim);
//hipDeviceSynchronize();
}
__global__ void concat_kernel(dtype **src, dtype* dst, int src_dim, int dst_dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dst_dim) {
int idx = index / src_dim;
int idy = index % src_dim;
dst[index] = src[idx][idy];
}
}
void concat_impl(dtype **src, dtype* dst, int src_dim, int dst_dim) {
hipLaunchKernelGGL(( concat_kernel), dim3((dst_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, src_dim, dst_dim);
//hipDeviceSynchronize();
}
__global__ void concat_kernel(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_size) {
int s_idx = index / (n * src_dim);
int s_idy = (index - s_idx * n * src_dim) / src_dim;
int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim);
//printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]);
int d_idx = s_idx;
int d_idy = index % (n * src_dim);
//printf("id:%d:\n", index);
dst[d_idx][d_idy] = src[s_idx][s_idy][s_idz];
}
}
void concat_impl(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) {
hipLaunchKernelGGL(( concat_kernel), dim3((src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, src_dim, src_size, dst);
//hipDeviceSynchronize();
}
__global__ void unconcat_kernel(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_size) {
int s_idx = index / (n * src_dim);
int s_idy = (index - s_idx * n * src_dim) / src_dim;
int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim);
//printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]);
int d_idx = s_idx;
int d_idy = index % (n * src_dim);
//printf("id:%d:\n", index);
dst[s_idx][s_idy][s_idz] = src[d_idx][d_idy];
}
}
void unconcat_impl(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) {
hipLaunchKernelGGL(( unconcat_kernel), dim3((src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, src_dim, src_size, dst);
}
__global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[index] = src[offset + index];
}
}
void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) {
hipLaunchKernelGGL(( unconcat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim);
//hipDeviceSynchronize();
}
__global__ void unconcat_kernel(const dtype *src, dtype **dst, int src_dim, int dst_dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_dim) {
int idx = index / dst_dim;
int idy = index % dst_dim;
dst[idx][idy] = src[index];
}
}
void unconcat_impl(const dtype *src, dtype** dst, int src_dim, int dst_dim) {
hipLaunchKernelGGL(( unconcat_kernel), dim3((src_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, src_dim, dst_dim);
//hipDeviceSynchronize();
}
__global__ void Fconcat_kernel(dtype ***src, int count, int n, int *offset_ptr, int dst_dim, int dst_size, dtype **dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dst_size) {
int dst_idx = index / dst_dim;
int dst_idy = index % dst_dim;
int src_idx = dst_idx;
int src_idy = -1, src_idz = -1;
for(int i = 0; i < n; i++) {
if (dst_idy - offset_ptr[i] < 0) {
src_idy = i;
src_idz = dst_idy - offset_ptr[src_idy - 1];
break;
}
}
//printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz);
dst[dst_idx][dst_idy] = src[src_idx][src_idy][src_idz];
//printf("%d\n", offset_ptr[index]);
}
}
void Fconcat_impl(dtype ***src, int count, int n, int *offset_ptr, int dst_dim, dtype **dst) {
int dst_size = count * dst_dim;
hipLaunchKernelGGL(( Fconcat_kernel), dim3((dst_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, count, n, offset_ptr, dst_dim, dst_size, dst);
}
__global__ void Dconcat_kernel(dtype **loss, int count, int n, int *offset_ptr, int loss_dim, int loss_size, dtype ***in_loss) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < loss_size) {
int dst_idx = index / loss_dim;
int dst_idy = index % loss_dim;
int src_idx = dst_idx;
int src_idy = -1, src_idz = -1;
for(int i = 0; i < n; i++) {
if (dst_idy - offset_ptr[i] < 0) {
src_idy = i;
src_idz = dst_idy - offset_ptr[src_idy - 1];
break;
}
}
atomicAdd(in_loss[src_idx][src_idy] + src_idz, loss[dst_idx][dst_idy]);
//in_loss[src_idx][src_idy][src_idz] += loss[dst_idx][dst_idy];
}
}
void Dconcat_impl(dtype **loss, int count, int n, int *offset_ptr, int loss_dim, dtype ***in_loss) {
int loss_size = count * loss_dim;
hipLaunchKernelGGL(( Dconcat_kernel), dim3((loss_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, loss, count, n, offset_ptr, loss_dim, loss_size, in_loss);
}
__global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index % dim0 * dim1 + index / dim0];
}
}
void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) {
hipLaunchKernelGGL(( Ftranspose_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, dim1, size);
//hipDeviceSynchronize();
}
__global__ void set_kernel(dtype **x, int* dims, int n, int max_dim, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int idx = index / max_dim;
int idy = index % max_dim;
if (idx < n && idy < dims[idx]) {
x[idx][idy] = val;
}
}
void set_impl(dtype **x, int* dims, int n, int max_dim, dtype val) {
int size = n * max_dim;
hipLaunchKernelGGL(( set_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dims, n, max_dim, val);
}
__global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = val;
}
}
void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) {
hipLaunchKernelGGL(( set_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, col, size, val);
//hipDeviceSynchronize();
}
__global__ void set_cols_kernel(dtype* x, int dim0, int* cols, int col_num, dtype* val, int val_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < val_size) {
int col_num = cols[index / dim0];
int offset = index % dim0;
x[col_num * dim0 + offset] = val[index];
}
}
void set_cols_impl(dtype* x, int dim0, int* cols, int col_num, dtype* val) {
int val_size = col_num * dim0;
hipLaunchKernelGGL(( set_cols_kernel), dim3((val_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, cols, col_num, val, val_size);
//hipDeviceSynchronize();
}
__global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[col_index][offset] = x[x_index];
}
}
}
}
void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
hipLaunchKernelGGL(( FLookup_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0,
x, r, xdim0, xdim1, r_size, cols, col_num);
//hipDeviceSynchronize();
}
__global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < l_size) {
int col_index = index / gxdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % gxdim0;
int gx_index = col * gxdim0 + offset;
if(gx_index < gxdim0 * gxdim1) {
atomicAdd(gx + gx_index, loss[col_index][offset]);
//gx[gx_index] += loss[col_index][offset];
}
}
}
}
void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) {
hipLaunchKernelGGL(( DLookup_kernel), dim3((l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0,
gx, loss, gxdim0, gxdim1, l_size, cols, col_num);
//hipDeviceSynchronize();
}
__global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[index] = x[x_index];
}
}
}
}
void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
hipLaunchKernelGGL(( get_cols_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0,
x, r, xdim0, xdim1, r_size, cols, col_num);
//hipDeviceSynchronize();
}
__global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
r[index] = x[i];
}
}
void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) {
hipLaunchKernelGGL(( get_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, col, size);
//hipDeviceSynchronize();
}
__global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = x[i] + y[index];
}
}
void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) {
hipLaunchKernelGGL(( Fadd_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, col, dim0, size);
//hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Favgpooling_kernel(
dtype **px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
int index_start = bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
temp[tid] += px[idx][idy];
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0] / n;
}
void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Favgpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//hipDeviceSynchronize();
}
__global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gx_size) {
int idx = i / gy_size;
int idy = i % gy_size;
atomicAdd(gx[idx] + idy, gy[idy] / n);
//gx[idx][idy] += (gy[idy] / n);
}
}
void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) {
hipLaunchKernelGGL(( Davgpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, n, gx);
//hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fsumpooling_kernel(
dtype **px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
int index_start = bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
temp[tid] += val;
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fsumpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//hipDeviceSynchronize();
}
__global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gx_size) {
int idx = i / gy_size;
int idy = i % gy_size;
atomicAdd(gx[idx] + idy, gy[idy]);
//gx[idx][idy] += gy[idy];
}
}
void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) {
hipLaunchKernelGGL(( Dsumpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, gx);
//hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fmaxpooling_kernel(
dtype **px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
dtype thread_max = NEGATIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_max;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
if(val > thread_max) {
thread_max = val;
index_max = index_start + i * skip;
}
}
temp[tid] = thread_max;
temp_index[tid] = index_max;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fmaxpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//hipDeviceSynchronize();
}
__global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
int idx = index[i] / dim;
int idy = index[i] % dim;
atomicAdd(gx[idx] + idy, gy[i]);
//gx[idx][idy] += gy[i];
}
}
void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) {
hipLaunchKernelGGL(( Dmaxpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim);
//hipDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fminpooling_kernel(
dtype **px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
dtype thread_min = POSITIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_min;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
if(val < thread_min) {
thread_min = val;
index_min = index_start + i * skip;
}
}
temp[tid] = thread_min;
temp_index[tid] = index_min;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::Fminpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//hipDeviceSynchronize();
}
__global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
int idx = index[i] / dim;
int idy = index[i] % dim;
atomicAdd(gx[idx] + idy, gy[i]);
//gx[idx][idy] += gy[i];
}
}
void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) {
hipLaunchKernelGGL(( Dminpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim);
//hipDeviceSynchronize();
}
| a2b396e859b64aa399eeb2a060fafb98159e1e68.cu | #include "kernel.cuh"
#define THREADS_PER_BLOCK 1024
__global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y[index];
}
}
void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] += y[index];
}
}
void Fadd_inplace_impl(dtype * x, const dtype *y, int size) {
Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
dtype sum = x[index];
int offset = 0;
for(int idx = 0; idx < count; idx++) {
int global = index + offset;
int idx = global / size;
int idy = global % size;
sum += y[idx][idy];
offset += size;
}
r[index] = sum;
}
}
void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) {
Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, count, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = x[idx][idy] + y[idx][idy];
}
}
void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) {
Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype** x, dtype** y, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
x[idx][idy] += y[idx][idy];
}
}
void Fadd_inplace_impl(dtype** x, dtype** y, int dim0, int size) {
Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_inplace_kernel(dtype* x, dtype** y, int count, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
dtype sum = x[index];
int offset = 0;
for(int idx = 0; idx < count; idx++) {
int global = index + offset;
int idx = global / size;
int idy = global % size;
sum += y[idx][idy];
offset += size;
}
x[index] = sum;
}
}
void Fadd_inplace_impl(dtype* x, dtype** y, int count, int size) {
Fadd_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, count, size);
//cudaDeviceSynchronize();
}
__global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] - y[index];
}
}
void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fsubtract_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fsubtract_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] -= y[index];
}
}
void Fsubtract_inplace_impl(dtype* x, const dtype* y, int size) {
Fsubtract_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y[index];
}
}
void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_inplace_kernel(dtype* x, const dtype* y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] *= y[index];
}
}
void Fmultiply_inplace_impl(dtype* x, const dtype* y, int size) {
Fmultiply_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = x[idx][idy] * y[idx][idy];
}
}
void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) {
Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_inplace_kernel(dtype** x, dtype** y, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
x[idx][idy] *= y[idx][idy];
}
}
void Fmultiply_inplace_impl(dtype** x, dtype** y, int dim0, int size) {
Fmultiply_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] / y[index];
}
}
void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Fdivide_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * y;
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype* scalar, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * scalar[0];
}
}
void Fmultiply_scalar_impl(const dtype* x, const dtype* scalar, dtype* r, int size) {
Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, scalar, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fmultiply_scalar_inplace_kernel(dtype* x, const dtype y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] *= y;
}
}
void Fmultiply_scalar_inplace_impl(dtype* x, const dtype y, int size) {
Fmultiply_scalar_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] + y;
}
}
void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) {
Fadd_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_scalar_inplace_kernel(dtype* x, const dtype y, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
x[index] += y;
}
}
void Fadd_scalar_inplace_impl(dtype* x, const dtype y, int size) {
Fadd_scalar_inplace_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, size);
//cudaDeviceSynchronize();
}
__global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index] * x[index];
}
}
void Fsquare_impl(const dtype* x, dtype* r, int size) {
Fsquare_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
//cudaDeviceSynchronize();
}
__global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = tanh(x[index]);
}
}
void Ftanh_impl(const dtype* x, dtype* r, int size) {
Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
//cudaDeviceSynchronize();
}
__global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = tanh(x[idx][idy]);
}
}
void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) {
Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy]));
}
}
void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) {
Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy];
}
}
void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){
Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 + y[index]) * (1 - y[index]);
}
}
void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
int idx = index / dim0;
int idy = index % dim0;
r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]);
}
}
void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){
Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size);
//cudaDeviceSynchronize();
}
__global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = 1.0 / (1.0 + exp(-x[index]));
}
}
void Fsigmoid_impl(const dtype* x, dtype* r, int size) {
Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
//cudaDeviceSynchronize();
}
__global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = (1 - y[index]) * y[index];
}
}
void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) {
Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size);
//cudaDeviceSynchronize();
}
__global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = sqrt(x[index]);
}
}
void Fsqrt_impl(const dtype* x, dtype* r, int size) {
Fsqrt_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size);
//cudaDeviceSynchronize();
}
__global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[offset + index] = src[index];
}
}
void concat_impl(const dtype *src, dtype* dst, int offset, int dim) {
concat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim);
//cudaDeviceSynchronize();
}
__global__ void concat_kernel(dtype **src, dtype* dst, int src_dim, int dst_dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dst_dim) {
int idx = index / src_dim;
int idy = index % src_dim;
dst[index] = src[idx][idy];
}
}
void concat_impl(dtype **src, dtype* dst, int src_dim, int dst_dim) {
concat_kernel<<<(dst_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, src_dim, dst_dim);
//cudaDeviceSynchronize();
}
__global__ void concat_kernel(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_size) {
int s_idx = index / (n * src_dim);
int s_idy = (index - s_idx * n * src_dim) / src_dim;
int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim);
//printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]);
int d_idx = s_idx;
int d_idy = index % (n * src_dim);
//printf("id:%d:\n", index);
dst[d_idx][d_idy] = src[s_idx][s_idy][s_idz];
}
}
void concat_impl(dtype ***src, int count, int n, int src_dim, int src_size, dtype **dst) {
concat_kernel<<<(src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, src_dim, src_size, dst);
//cudaDeviceSynchronize();
}
__global__ void unconcat_kernel(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_size) {
int s_idx = index / (n * src_dim);
int s_idy = (index - s_idx * n * src_dim) / src_dim;
int s_idz = (index - s_idx * s_idy * src_dim) % (src_dim);
//printf("id:%d:, idx:%d, idy:%d, idz:%d, val:%lf\n", index, s_idx, s_idy, s_idz, src[s_idx][s_idy][s_idz]);
int d_idx = s_idx;
int d_idy = index % (n * src_dim);
//printf("id:%d:\n", index);
dst[s_idx][s_idy][s_idz] = src[d_idx][d_idy];
}
}
void unconcat_impl(dtype **src, int count, int n, int src_dim, int src_size, dtype ***dst) {
unconcat_kernel<<<(src_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, src_dim, src_size, dst);
}
__global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dim) {
dst[index] = src[offset + index];
}
}
void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) {
unconcat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim);
//cudaDeviceSynchronize();
}
__global__ void unconcat_kernel(const dtype *src, dtype **dst, int src_dim, int dst_dim) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < src_dim) {
int idx = index / dst_dim;
int idy = index % dst_dim;
dst[idx][idy] = src[index];
}
}
void unconcat_impl(const dtype *src, dtype** dst, int src_dim, int dst_dim) {
unconcat_kernel<<<(src_dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, src_dim, dst_dim);
//cudaDeviceSynchronize();
}
__global__ void Fconcat_kernel(dtype ***src, int count, int n, int *offset_ptr, int dst_dim, int dst_size, dtype **dst) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < dst_size) {
int dst_idx = index / dst_dim;
int dst_idy = index % dst_dim;
int src_idx = dst_idx;
int src_idy = -1, src_idz = -1;
for(int i = 0; i < n; i++) {
if (dst_idy - offset_ptr[i] < 0) {
src_idy = i;
src_idz = dst_idy - offset_ptr[src_idy - 1];
break;
}
}
//printf("%d, %d, %d, %d\n", index, src_idx, src_idy, src_idz);
dst[dst_idx][dst_idy] = src[src_idx][src_idy][src_idz];
//printf("%d\n", offset_ptr[index]);
}
}
void Fconcat_impl(dtype ***src, int count, int n, int *offset_ptr, int dst_dim, dtype **dst) {
int dst_size = count * dst_dim;
Fconcat_kernel<<<(dst_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, count, n, offset_ptr, dst_dim, dst_size, dst);
}
__global__ void Dconcat_kernel(dtype **loss, int count, int n, int *offset_ptr, int loss_dim, int loss_size, dtype ***in_loss) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < loss_size) {
int dst_idx = index / loss_dim;
int dst_idy = index % loss_dim;
int src_idx = dst_idx;
int src_idy = -1, src_idz = -1;
for(int i = 0; i < n; i++) {
if (dst_idy - offset_ptr[i] < 0) {
src_idy = i;
src_idz = dst_idy - offset_ptr[src_idy - 1];
break;
}
}
atomicAdd(in_loss[src_idx][src_idy] + src_idz, loss[dst_idx][dst_idy]);
//in_loss[src_idx][src_idy][src_idz] += loss[dst_idx][dst_idy];
}
}
void Dconcat_impl(dtype **loss, int count, int n, int *offset_ptr, int loss_dim, dtype ***in_loss) {
int loss_size = count * loss_dim;
Dconcat_kernel<<<(loss_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(loss, count, n, offset_ptr, loss_dim, loss_size, in_loss);
}
__global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
r[index] = x[index % dim0 * dim1 + index / dim0];
}
}
void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) {
Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size);
//cudaDeviceSynchronize();
}
__global__ void set_kernel(dtype **x, int* dims, int n, int max_dim, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int idx = index / max_dim;
int idy = index % max_dim;
if (idx < n && idy < dims[idx]) {
x[idx][idy] = val;
}
}
void set_impl(dtype **x, int* dims, int n, int max_dim, dtype val) {
int size = n * max_dim;
set_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dims, n, max_dim, val);
}
__global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = val;
}
}
void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) {
set_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, col, size, val);
//cudaDeviceSynchronize();
}
__global__ void set_cols_kernel(dtype* x, int dim0, int* cols, int col_num, dtype* val, int val_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < val_size) {
int col_num = cols[index / dim0];
int offset = index % dim0;
x[col_num * dim0 + offset] = val[index];
}
}
void set_cols_impl(dtype* x, int dim0, int* cols, int col_num, dtype* val) {
int val_size = col_num * dim0;
set_cols_kernel<<<(val_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, cols, col_num, val, val_size);
//cudaDeviceSynchronize();
}
__global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[col_index][offset] = x[x_index];
}
}
}
}
void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
FLookup_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>
(x, r, xdim0, xdim1, r_size, cols, col_num);
//cudaDeviceSynchronize();
}
__global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < l_size) {
int col_index = index / gxdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % gxdim0;
int gx_index = col * gxdim0 + offset;
if(gx_index < gxdim0 * gxdim1) {
atomicAdd(gx + gx_index, loss[col_index][offset]);
//gx[gx_index] += loss[col_index][offset];
}
}
}
}
void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) {
DLookup_kernel<<<(l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>
(gx, loss, gxdim0, gxdim1, l_size, cols, col_num);
//cudaDeviceSynchronize();
}
__global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < r_size) {
int col_index = index / xdim0;
if(col_index < col_num) {
int col = cols[col_index];
int offset = index % xdim0;
int x_index = col * xdim0 + offset;
if(x_index < xdim0 * xdim1) {
r[index] = x[x_index];
}
}
}
}
void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) {
get_cols_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>
(x, r, xdim0, xdim1, r_size, cols, col_num);
//cudaDeviceSynchronize();
}
__global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
r[index] = x[i];
}
}
void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) {
get_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, col, size);
//cudaDeviceSynchronize();
}
__global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + col * dim0;
if (i < size && index < dim0) {
x[i] = x[i] + y[index];
}
}
void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) {
Fadd_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, col, dim0, size);
//cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Favgpooling_kernel(
dtype **px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
int index_start = bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
temp[tid] += px[idx][idy];
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0] / n;
}
void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Favgpooling_kernel<k><<<r, k>>>(x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//cudaDeviceSynchronize();
}
__global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gx_size) {
int idx = i / gy_size;
int idy = i % gy_size;
atomicAdd(gx[idx] + idy, gy[idy] / n);
//gx[idx][idy] += (gy[idy] / n);
}
}
void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) {
Davgpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, n, gx);
//cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fsumpooling_kernel(
dtype **px, int skip, int n, dtype *py) {
__shared__ dtype temp[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
int index_start = bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
temp[tid] += val;
}
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fsumpooling_kernel<k><<<r, k>>>(x, s, n, y); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//cudaDeviceSynchronize();
}
__global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gx_size) {
int idx = i / gy_size;
int idy = i % gy_size;
atomicAdd(gx[idx] + idy, gy[idy]);
//gx[idx][idy] += gy[idy];
}
}
void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) {
Dsumpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, gx);
//cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fmaxpooling_kernel(
dtype **px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
dtype thread_max = NEGATIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_max;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
if(val > thread_max) {
thread_max = val;
index_max = index_start + i * skip;
}
}
temp[tid] = thread_max;
temp_index[tid] = index_max;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fmaxpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//cudaDeviceSynchronize();
}
__global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
int idx = index[i] / dim;
int idy = index[i] % dim;
atomicAdd(gx[idx] + idy, gy[i]);
//gx[idx][idy] += gy[i];
}
}
void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) {
Dmaxpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim);
//cudaDeviceSynchronize();
}
template<int BLOCK_SIZE>
__global__ void Fminpooling_kernel(
dtype **px, int skip, int n, dtype *py, int* index) {
__shared__ dtype temp[BLOCK_SIZE];
__shared__ int temp_index[BLOCK_SIZE];
const int bid = blockIdx.x;
const int tid = threadIdx.x;
//px += bid % skip + (bid / skip) * skip * n;
dtype thread_min = POSITIVE_INFINITY;
int index_start = bid % skip + (bid / skip) * skip * n;
int index_min;
for (int i = tid; i < n; i += BLOCK_SIZE) {
int global = index_start + i * skip;
int idx = global / skip;
int idy = global % skip;
dtype val = px[idx][idy];
if(val < thread_min) {
thread_min = val;
index_min = index_start + i * skip;
}
}
temp[tid] = thread_min;
temp_index[tid] = index_min;
::__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \
::__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];}
}
void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) {
int block_size = THREADS_PER_BLOCK;
while (block_size >> 1 >= n) block_size >>= 1;
switch (block_size) {
#define CASE(k) \
case k: ::Fminpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
//cudaDeviceSynchronize();
}
__global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < dim) {
int idx = index[i] / dim;
int idy = index[i] % dim;
atomicAdd(gx[idx] + idy, gy[i]);
//gx[idx][idy] += gy[i];
}
}
void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) {
Dminpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim);
//cudaDeviceSynchronize();
}
|
505957eba7bb362f62cfc4760bef5568c7a41608.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/coo_sort.cc
* \brief Sort COO index
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "../../c_api_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
///////////////////////////// COOSort_ /////////////////////////////
/**
* @brief Encode row and column IDs into a single scalar per edge.
*
* @tparam IdType The type to encode as.
* @param row The row (src) IDs per edge.
* @param col The column (dst) IDs per edge.
* @param nnz The number of edges.
* @param col_bits The number of bits used to encode the destination. The row
* information is packed into the remaining bits.
* @param key The encoded edges (output).
*/
template <typename IdType>
__global__ void _COOEncodeEdgesKernel(
const IdType* const row, const IdType* const col,
const int64_t nnz, const int col_bits, IdType * const key) {
int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (tx < nnz) {
key[tx] = row[tx] << col_bits | col[tx];
}
}
/**
* @brief Decode row and column IDs from the encoded edges.
*
* @tparam IdType The type the edges are encoded as.
* @param key The encoded edges.
* @param nnz The number of edges.
* @param col_bits The number of bits used to store the column/dst ID.
* @param row The row (src) IDs per edge (output).
* @param col The col (dst) IDs per edge (output).
*/
template <typename IdType>
__global__ void _COODecodeEdgesKernel(
const IdType* const key, const int64_t nnz, const int col_bits,
IdType * const row, IdType * const col) {
int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (tx < nnz) {
const IdType k = key[tx];
row[tx] = k >> col_bits;
col[tx] = k & ((1 << col_bits) - 1);
}
}
template<typename T>
int _NumberOfBits(const T& range) {
if (range <= 1) {
// ranges of 0 or 1 require no bits to store
return 0;
}
int bits = 1;
while (bits < sizeof(T)*8 && (1 << bits) < range) {
++bits;
}
CHECK_EQ((range-1) >> bits, 0);
CHECK_NE((range-1) >> (bits-1), 0);
return bits;
}
template <DLDeviceType XPU, typename IdType>
void COOSort_(COOMatrix* coo, bool sort_column) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const int row_bits = _NumberOfBits(coo->num_rows);
const int64_t nnz = coo->row->shape[0];
if (sort_column) {
const int col_bits = _NumberOfBits(coo->num_cols);
const int num_bits = row_bits + col_bits;
const int nt = 256;
const int nb = (nnz+nt-1)/nt;
CHECK(static_cast<int64_t>(nb)*nt >= nnz);
IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits);
CUDA_KERNEL_CALL(_COOEncodeEdgesKernel, nb, nt, 0, thr_entry->stream,
coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(),
nnz, col_bits, pos.Ptr<IdType>());
auto sorted = Sort(pos, num_bits);
CUDA_KERNEL_CALL(_COODecodeEdgesKernel, nb, nt, 0, thr_entry->stream,
sorted.first.Ptr<IdType>(), nnz, col_bits,
coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>());
if (aten::COOHasData(*coo))
coo->data = IndexSelect(coo->data, sorted.second);
else
coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
coo->row_sorted = coo->col_sorted = true;
} else {
const int num_bits = row_bits;
auto sorted = Sort(coo->row, num_bits);
coo->row = sorted.first;
coo->col = IndexSelect(coo->col, sorted.second);
if (aten::COOHasData(*coo))
coo->data = IndexSelect(coo->data, sorted.second);
else
coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
coo->row_sorted = true;
}
}
template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column);
template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column);
///////////////////////////// COOIsSorted /////////////////////////////
template <typename IdType>
__global__ void _COOIsSortedKernel(
const IdType* row, const IdType* col,
int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < nnz) {
if (tx == 0) {
row_sorted[0] = 1;
col_sorted[0] = 1;
} else {
row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]);
col_sorted[tx] = static_cast<int8_t>(
row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]);
}
tx += stride_x;
}
}
template <DLDeviceType XPU, typename IdType>
std::pair<bool, bool> COOIsSorted(COOMatrix coo) {
const int64_t nnz = coo.row->shape[0];
const auto& ctx = coo.row->ctx;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(ctx);
// We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should
// be fine.
int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
const int nt = cuda::FindNumThreads(nnz);
const int nb = (nnz + nt - 1) / nt;
CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream,
coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(),
nnz, row_flags, col_flags);
const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx);
const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false;
device->FreeWorkspace(ctx, row_flags);
device->FreeWorkspace(ctx, col_flags);
return {row_sorted, col_sorted};
}
template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo);
template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo);
} // namespace impl
} // namespace aten
} // namespace dgl
| 505957eba7bb362f62cfc4760bef5568c7a41608.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/coo_sort.cc
* \brief Sort COO index
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "../../c_api_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
///////////////////////////// COOSort_ /////////////////////////////
/**
* @brief Encode row and column IDs into a single scalar per edge.
*
* @tparam IdType The type to encode as.
* @param row The row (src) IDs per edge.
* @param col The column (dst) IDs per edge.
* @param nnz The number of edges.
* @param col_bits The number of bits used to encode the destination. The row
* information is packed into the remaining bits.
* @param key The encoded edges (output).
*/
template <typename IdType>
__global__ void _COOEncodeEdgesKernel(
const IdType* const row, const IdType* const col,
const int64_t nnz, const int col_bits, IdType * const key) {
int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (tx < nnz) {
key[tx] = row[tx] << col_bits | col[tx];
}
}
/**
* @brief Decode row and column IDs from the encoded edges.
*
* @tparam IdType The type the edges are encoded as.
* @param key The encoded edges.
* @param nnz The number of edges.
* @param col_bits The number of bits used to store the column/dst ID.
* @param row The row (src) IDs per edge (output).
* @param col The col (dst) IDs per edge (output).
*/
template <typename IdType>
__global__ void _COODecodeEdgesKernel(
const IdType* const key, const int64_t nnz, const int col_bits,
IdType * const row, IdType * const col) {
int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (tx < nnz) {
const IdType k = key[tx];
row[tx] = k >> col_bits;
col[tx] = k & ((1 << col_bits) - 1);
}
}
template<typename T>
int _NumberOfBits(const T& range) {
if (range <= 1) {
// ranges of 0 or 1 require no bits to store
return 0;
}
int bits = 1;
while (bits < sizeof(T)*8 && (1 << bits) < range) {
++bits;
}
CHECK_EQ((range-1) >> bits, 0);
CHECK_NE((range-1) >> (bits-1), 0);
return bits;
}
template <DLDeviceType XPU, typename IdType>
void COOSort_(COOMatrix* coo, bool sort_column) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const int row_bits = _NumberOfBits(coo->num_rows);
const int64_t nnz = coo->row->shape[0];
if (sort_column) {
const int col_bits = _NumberOfBits(coo->num_cols);
const int num_bits = row_bits + col_bits;
const int nt = 256;
const int nb = (nnz+nt-1)/nt;
CHECK(static_cast<int64_t>(nb)*nt >= nnz);
IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits);
CUDA_KERNEL_CALL(_COOEncodeEdgesKernel, nb, nt, 0, thr_entry->stream,
coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(),
nnz, col_bits, pos.Ptr<IdType>());
auto sorted = Sort(pos, num_bits);
CUDA_KERNEL_CALL(_COODecodeEdgesKernel, nb, nt, 0, thr_entry->stream,
sorted.first.Ptr<IdType>(), nnz, col_bits,
coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>());
if (aten::COOHasData(*coo))
coo->data = IndexSelect(coo->data, sorted.second);
else
coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
coo->row_sorted = coo->col_sorted = true;
} else {
const int num_bits = row_bits;
auto sorted = Sort(coo->row, num_bits);
coo->row = sorted.first;
coo->col = IndexSelect(coo->col, sorted.second);
if (aten::COOHasData(*coo))
coo->data = IndexSelect(coo->data, sorted.second);
else
coo->data = AsNumBits(sorted.second, coo->row->dtype.bits);
coo->row_sorted = true;
}
}
template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column);
template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column);
///////////////////////////// COOIsSorted /////////////////////////////
template <typename IdType>
__global__ void _COOIsSortedKernel(
const IdType* row, const IdType* col,
int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < nnz) {
if (tx == 0) {
row_sorted[0] = 1;
col_sorted[0] = 1;
} else {
row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]);
col_sorted[tx] = static_cast<int8_t>(
row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]);
}
tx += stride_x;
}
}
template <DLDeviceType XPU, typename IdType>
std::pair<bool, bool> COOIsSorted(COOMatrix coo) {
const int64_t nnz = coo.row->shape[0];
const auto& ctx = coo.row->ctx;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(ctx);
// We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should
// be fine.
int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz));
const int nt = cuda::FindNumThreads(nnz);
const int nb = (nnz + nt - 1) / nt;
CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream,
coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(),
nnz, row_flags, col_flags);
const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx);
const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false;
device->FreeWorkspace(ctx, row_flags);
device->FreeWorkspace(ctx, col_flags);
return {row_sorted, col_sorted};
}
template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo);
template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo);
} // namespace impl
} // namespace aten
} // namespace dgl
|
266100cdd0c2422cd0afa40cda2a757b3ae0b35f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.cuh"
#include "fp16.cuh"
#include <hip/hip_fp16.h>
#define BLOCK_DIM 512
namespace fp16
{
__global__ void float2half_kernel(half *out, float *in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
out[idx] = __float2half(in[idx]);
}
__global__ void half2float_kernel(float *out, half *in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
out[idx] = __half2float(in[idx]);
}
void float2half(half *out, float *in, size_t length)
{
hipLaunchKernelGGL(( float2half_kernel), dim3((length + BLOCK_DIM - 1) / BLOCK_DIM), dim3(BLOCK_DIM) , 0, 0, out, in);
}
void half2float(float *out, half *in, size_t length)
{
hipLaunchKernelGGL(( half2float_kernel), dim3((length + BLOCK_DIM - 1) / BLOCK_DIM), dim3(BLOCK_DIM) , 0, 0, out, in);
}
} // namespace fp16 | 266100cdd0c2422cd0afa40cda2a757b3ae0b35f.cu | #include "helper.cuh"
#include "fp16.cuh"
#include <cuda_fp16.h>
#define BLOCK_DIM 512
namespace fp16
{
__global__ void float2half_kernel(half *out, float *in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
out[idx] = __float2half(in[idx]);
}
__global__ void half2float_kernel(float *out, half *in)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
out[idx] = __half2float(in[idx]);
}
void float2half(half *out, float *in, size_t length)
{
float2half_kernel<<< (length + BLOCK_DIM - 1) / BLOCK_DIM, BLOCK_DIM >>>(out, in);
}
void half2float(float *out, half *in, size_t length)
{
half2float_kernel<<< (length + BLOCK_DIM - 1) / BLOCK_DIM, BLOCK_DIM >>>(out, in);
}
} // namespace fp16 |
c28abe69d2dcd5088280a3c3d05c0e44696fcf44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* COMPILE: nvcc -std=c++11 clustering_cuda_skeleton.cu clustering_impl.cpp main.cpp -o cuda
* RUN: ./cuda <path> <epsilon> <mu> <num_blocks_per_grid> <num_threads_per_block>
*/
#include <iostream>
#include "clustering.h"
void expansion(int v, int label, int *nbr_offs, int *sim_nbrs,
bool *visited, bool *pivots, int *cluster_result){
for (int j=nbr_offs[v]; j<nbr_offs[v+1]; j++){
if (sim_nbrs[j] != -1){
int nbr_id = sim_nbrs[j];
if((pivots[nbr_id]) && (!visited[nbr_id])){
visited[nbr_id] = true;
cluster_result[nbr_id] = label;
expansion(nbr_id, label, nbr_offs,
sim_nbrs, visited, pivots, cluster_result);
}
} else{
break;
}
}
}
__global__ void parallel(int *nbr_offs_device, int *nbrs_device, float epsilon, int mu,
int num_vs, bool *pivots_device, int *sim_nbrs_device){
//Stage 1
const int num_thread = blockDim.x*gridDim.x;
for(int i = blockDim.x*blockIdx.x + threadIdx.x; i<num_vs; i+=num_thread){
// i: Vertex id
int left_start = nbr_offs_device[i];
int left_end = nbr_offs_device[i+1];
int left_size = left_end - left_start;
int counter = 0;
for (int j = left_start; j < left_end; j++){
int nbr_id = nbrs_device[j];
int right_start = nbr_offs_device[nbr_id];
int right_end = nbr_offs_device[nbr_id + 1];
int right_size = right_end - right_start;
// compute the similarity
int left_pos = left_start, right_pos = right_start, num_com_nbrs = 0;
while (left_pos < left_end && right_pos < right_end) {
if (nbrs_device[left_pos] == nbrs_device[right_pos]) {
num_com_nbrs++;
left_pos++;
right_pos++;
} else if (nbrs_device[left_pos] < nbrs_device[right_pos]) {
left_pos++;
} else {
right_pos++;
}
}
float sim = (num_com_nbrs + 2) / sqrt((left_size + 1.0) * (right_size + 1.0));
if (sim > epsilon) {
sim_nbrs_device[nbr_offs_device[i] + counter] = nbr_id;
counter++;
}
}
if(counter > mu) pivots_device[i] = true;
}
}
void cuda_scan(int num_vs, int num_es, int *nbr_offs, int *nbrs,
float epsilon, int mu, int num_blocks_per_grid, int num_threads_per_block,
int &num_clusters, int *cluster_result) {
// Stage 1
bool *pivots, *pivots_device, *visited;
int *nbrs_device, *nbr_offs_device;
int *sim_nbrs, *sim_nbrs_device;
pivots = (bool *)malloc(num_vs * sizeof(bool));
visited = (bool *)malloc(num_vs * sizeof(bool));
sim_nbrs = (int *)malloc(num_es * sizeof(int));
size_t size_vs_bool = num_vs * sizeof(bool);
size_t size_vs_int = (num_vs+1) * sizeof(int);
size_t size_es_int = (num_es+1) * sizeof(int);
hipMalloc(&pivots_device, size_vs_bool);
hipMalloc(&nbrs_device, size_es_int);
hipMalloc(&nbr_offs_device, size_vs_int);
hipMalloc(&sim_nbrs_device, size_es_int);
hipMemset(sim_nbrs_device, -1, size_es_int);
hipMemset(pivots_device, false, size_vs_bool);
std::fill(cluster_result, cluster_result + num_vs, -1);
std::fill(visited, visited + num_vs, false);
hipMemcpy(nbr_offs_device, nbr_offs, size_vs_int, hipMemcpyHostToDevice);
hipMemcpy(nbrs_device, nbrs, size_es_int, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( parallel), dim3(num_blocks_per_grid), dim3(num_threads_per_block), 0, 0, nbr_offs_device, nbrs_device,
epsilon, mu, num_vs, pivots_device, sim_nbrs_device);
hipMemcpy(pivots, pivots_device, size_vs_bool, hipMemcpyDeviceToHost);
hipMemcpy(sim_nbrs, sim_nbrs_device, size_es_int, hipMemcpyDeviceToHost);
// Stage 2
for (int i = 0; i < num_vs; i++) {
if (!pivots[i] || visited[i]) continue;
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, nbr_offs, sim_nbrs, visited, pivots, cluster_result);
}
num_clusters = 0;
for (auto i = 0; i< num_vs; i++){
if (cluster_result[i] == i)
num_clusters++;
}
free(pivots);
free(visited);
free(sim_nbrs);
hipFree(pivots_device);
hipFree(nbrs_device);
hipFree(nbr_offs_device);
}
| c28abe69d2dcd5088280a3c3d05c0e44696fcf44.cu | /*
* COMPILE: nvcc -std=c++11 clustering_cuda_skeleton.cu clustering_impl.cpp main.cpp -o cuda
* RUN: ./cuda <path> <epsilon> <mu> <num_blocks_per_grid> <num_threads_per_block>
*/
#include <iostream>
#include "clustering.h"
void expansion(int v, int label, int *nbr_offs, int *sim_nbrs,
bool *visited, bool *pivots, int *cluster_result){
for (int j=nbr_offs[v]; j<nbr_offs[v+1]; j++){
if (sim_nbrs[j] != -1){
int nbr_id = sim_nbrs[j];
if((pivots[nbr_id]) && (!visited[nbr_id])){
visited[nbr_id] = true;
cluster_result[nbr_id] = label;
expansion(nbr_id, label, nbr_offs,
sim_nbrs, visited, pivots, cluster_result);
}
} else{
break;
}
}
}
__global__ void parallel(int *nbr_offs_device, int *nbrs_device, float epsilon, int mu,
int num_vs, bool *pivots_device, int *sim_nbrs_device){
//Stage 1
const int num_thread = blockDim.x*gridDim.x;
for(int i = blockDim.x*blockIdx.x + threadIdx.x; i<num_vs; i+=num_thread){
// i: Vertex id
int left_start = nbr_offs_device[i];
int left_end = nbr_offs_device[i+1];
int left_size = left_end - left_start;
int counter = 0;
for (int j = left_start; j < left_end; j++){
int nbr_id = nbrs_device[j];
int right_start = nbr_offs_device[nbr_id];
int right_end = nbr_offs_device[nbr_id + 1];
int right_size = right_end - right_start;
// compute the similarity
int left_pos = left_start, right_pos = right_start, num_com_nbrs = 0;
while (left_pos < left_end && right_pos < right_end) {
if (nbrs_device[left_pos] == nbrs_device[right_pos]) {
num_com_nbrs++;
left_pos++;
right_pos++;
} else if (nbrs_device[left_pos] < nbrs_device[right_pos]) {
left_pos++;
} else {
right_pos++;
}
}
float sim = (num_com_nbrs + 2) / sqrt((left_size + 1.0) * (right_size + 1.0));
if (sim > epsilon) {
sim_nbrs_device[nbr_offs_device[i] + counter] = nbr_id;
counter++;
}
}
if(counter > mu) pivots_device[i] = true;
}
}
void cuda_scan(int num_vs, int num_es, int *nbr_offs, int *nbrs,
float epsilon, int mu, int num_blocks_per_grid, int num_threads_per_block,
int &num_clusters, int *cluster_result) {
// Stage 1
bool *pivots, *pivots_device, *visited;
int *nbrs_device, *nbr_offs_device;
int *sim_nbrs, *sim_nbrs_device;
pivots = (bool *)malloc(num_vs * sizeof(bool));
visited = (bool *)malloc(num_vs * sizeof(bool));
sim_nbrs = (int *)malloc(num_es * sizeof(int));
size_t size_vs_bool = num_vs * sizeof(bool);
size_t size_vs_int = (num_vs+1) * sizeof(int);
size_t size_es_int = (num_es+1) * sizeof(int);
cudaMalloc(&pivots_device, size_vs_bool);
cudaMalloc(&nbrs_device, size_es_int);
cudaMalloc(&nbr_offs_device, size_vs_int);
cudaMalloc(&sim_nbrs_device, size_es_int);
cudaMemset(sim_nbrs_device, -1, size_es_int);
cudaMemset(pivots_device, false, size_vs_bool);
std::fill(cluster_result, cluster_result + num_vs, -1);
std::fill(visited, visited + num_vs, false);
cudaMemcpy(nbr_offs_device, nbr_offs, size_vs_int, cudaMemcpyHostToDevice);
cudaMemcpy(nbrs_device, nbrs, size_es_int, cudaMemcpyHostToDevice);
parallel<<<num_blocks_per_grid, num_threads_per_block>>>(nbr_offs_device, nbrs_device,
epsilon, mu, num_vs, pivots_device, sim_nbrs_device);
cudaMemcpy(pivots, pivots_device, size_vs_bool, cudaMemcpyDeviceToHost);
cudaMemcpy(sim_nbrs, sim_nbrs_device, size_es_int, cudaMemcpyDeviceToHost);
// Stage 2
for (int i = 0; i < num_vs; i++) {
if (!pivots[i] || visited[i]) continue;
visited[i] = true;
cluster_result[i] = i;
expansion(i, i, nbr_offs, sim_nbrs, visited, pivots, cluster_result);
}
num_clusters = 0;
for (auto i = 0; i< num_vs; i++){
if (cluster_result[i] == i)
num_clusters++;
}
free(pivots);
free(visited);
free(sim_nbrs);
cudaFree(pivots_device);
cudaFree(nbrs_device);
cudaFree(nbr_offs_device);
}
|
69dbfaa4d612ffb4da5bc402feaef6a46645aefb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kExtractPatches2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *images = NULL;
hipMalloc(&images, XSIZE*YSIZE);
float *patches = NULL;
hipMalloc(&patches, XSIZE*YSIZE);
float *width_offset = NULL;
hipMalloc(&width_offset, XSIZE*YSIZE);
float *height_offset = NULL;
hipMalloc(&height_offset, XSIZE*YSIZE);
float *flip = NULL;
hipMalloc(&flip, XSIZE*YSIZE);
int num_images = 1;
int img_width = XSIZE;
int img_height = YSIZE;
int patch_width = XSIZE;
int patch_height = YSIZE;
int num_colors = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kExtractPatches2), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kExtractPatches2), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kExtractPatches2), dim3(gridBlock),dim3(threadBlock), 0, 0, images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 69dbfaa4d612ffb4da5bc402feaef6a46645aefb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kExtractPatches2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *images = NULL;
cudaMalloc(&images, XSIZE*YSIZE);
float *patches = NULL;
cudaMalloc(&patches, XSIZE*YSIZE);
float *width_offset = NULL;
cudaMalloc(&width_offset, XSIZE*YSIZE);
float *height_offset = NULL;
cudaMalloc(&height_offset, XSIZE*YSIZE);
float *flip = NULL;
cudaMalloc(&flip, XSIZE*YSIZE);
int num_images = 1;
int img_width = XSIZE;
int img_height = YSIZE;
int patch_width = XSIZE;
int patch_height = YSIZE;
int num_colors = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kExtractPatches2<<<gridBlock,threadBlock>>>(images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kExtractPatches2<<<gridBlock,threadBlock>>>(images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kExtractPatches2<<<gridBlock,threadBlock>>>(images,patches,width_offset,height_offset,flip,num_images,img_width,img_height,patch_width,patch_height,num_colors);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1b697b110ffbf7ea18234381beec7b1057992683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlascl.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
kl INTEGER
Unused, for LAPACK compatability.
\param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
\param[in]
cfrom DOUBLE PRECISION
\param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
double *dA, magma_int_t ldda, magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_lower) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( dlascl_upper) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( dlascl_full) , dim3(grid), dim3(threads), 0, queue , m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_dlascl_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
double *dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, info, magma_stream );
}
| 1b697b110ffbf7ea18234381beec7b1057992683.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlascl.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_full(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_lower(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
A += ind;
if (ind < m) {
for(int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_upper(int m, int n, double mul, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
A += ind;
if (ind < m) {
for(int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/**
Purpose
-------
DLASCL multiplies the M by N real matrix A by the real scalar
CTO/CFROM. This is done without over/underflow as long as the final
result CTO*A(I,J)/CFROM does not over/underflow. TYPE specifies that
A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
kl INTEGER
Unused, for LAPACK compatability.
\param[in]
ku KU is INTEGER
Unused, for LAPACK compatability.
\param[in]
cfrom DOUBLE PRECISION
\param[in]
cto DOUBLE PRECISION
\n
The matrix A is multiplied by CTO/CFROM. A(I,J) is computed
without over/underflow if the final result CTO*A(I,J)/CFROM
can be represented without over/underflow.
CFROM must be nonzero. CFROM and CTO must not be NAN.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be multiplied by CTO/CFROM. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_q(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
double *dA, magma_int_t ldda, magma_int_t *info,
magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( cfrom == 0 || isnan(cfrom) )
*info = -4;
else if ( isnan(cto) )
*info = -5;
else if ( m < 0 )
*info = -6;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
double smlnum, bignum, cfromc, ctoc, cto1, cfrom1, mul;
magma_int_t done = false;
// Uses over/underflow procedure from LAPACK dlascl
// Get machine parameters
smlnum = lapackf77_dlamch("s");
bignum = 1 / smlnum;
cfromc = cfrom;
ctoc = cto;
int cnt = 0;
while( ! done ) {
cfrom1 = cfromc*smlnum;
if( cfrom1 == cfromc ) {
// cfromc is an inf. Multiply by a correctly signed zero for
// finite ctoc, or a nan if ctoc is infinite.
mul = ctoc / cfromc;
done = true;
cto1 = ctoc;
}
else {
cto1 = ctoc / bignum;
if( cto1 == ctoc ) {
// ctoc is either 0 or an inf. In both cases, ctoc itself
// serves as the correct multiplication factor.
mul = ctoc;
done = true;
cfromc = 1;
}
else if( fabs(cfrom1) > fabs(ctoc) && ctoc != 0 ) {
mul = smlnum;
done = false;
cfromc = cfrom1;
}
else if( fabs(cto1) > fabs(cfromc) ) {
mul = bignum;
done = false;
ctoc = cto1;
}
else {
mul = ctoc / cfromc;
done = true;
}
}
if (type == MagmaLower) {
dlascl_lower <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaUpper) {
dlascl_upper <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
else if (type == MagmaFull) {
dlascl_full <<< grid, threads, 0, queue >>> (m, n, mul, dA, ldda);
}
cnt += 1;
}
}
/**
@see magmablas_dlascl_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl(
magma_type_t type, magma_int_t kl, magma_int_t ku,
double cfrom, double cto,
magma_int_t m, magma_int_t n,
double *dA, magma_int_t ldda, magma_int_t *info )
{
magmablas_dlascl_q( type, kl, ku, cfrom, cto, m, n, dA, ldda, info, magma_stream );
}
|
11751b3320deee17f85938ad2602b5b21b68019f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/kernels/cuda/sequence_pool_concat_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename Dtype>
__global__ void sequence_pool_concat(const uint64_t* input_locate_data,
const int* pool_type_list,
Dtype* output_data,
const int* offset,
int batch,
int in_num,
int in_dim) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int em_id = tid % in_dim;
int in_id = (tid / in_dim) % in_num;
int seq_id = tid / (in_dim * in_num);
if (seq_id >= batch) {
return;
}
Dtype* out_data = output_data + tid;
int offset_id = in_id * (batch + 1) + seq_id;
if (pool_type_list[in_id] == 4) { // last
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id;
output_data[tid] = in_data[(offset[offset_id + 1] - 1) * in_dim];
} else if (pool_type_list[in_id] == 6) { // max
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id + offset[offset_id] * in_dim;
Dtype max = in_data[0];
for (int i = 1; i < offset[offset_id + 1] - offset[offset_id]; i++) {
Dtype cur_data = in_data[i * in_dim];
max = cur_data > max ? cur_data : max;
}
output_data[tid] = max;
} else {
return;
}
}
template <typename Dtype>
__global__ void sequence_pool_concat(const uint64_t* input_locate_data,
const int* pool_type_list,
Dtype* output_data,
const int* offset,
int batch,
int in_num,
const int* out_offset,
const int* out_id_seq_map_data,
int out_dim) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int em_id = tid % out_dim;
int seq_id = tid / out_dim;
int in_id = out_id_seq_map_data[em_id];
em_id = em_id - out_offset[in_id];
int in_dim = out_offset[in_id + 1] - out_offset[in_id];
if (seq_id >= batch) {
return;
}
Dtype* out_data = output_data + tid;
int offset_id = in_id * (batch + 1) + seq_id;
if (pool_type_list[in_id] == 4) { // last
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id;
output_data[tid] = in_data[(offset[offset_id + 1] - 1) * in_dim];
} else if (pool_type_list[in_id] == 6) { // max
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id + offset[offset_id] * in_dim;
Dtype max = in_data[0];
for (int i = 1; i < offset[offset_id + 1] - offset[offset_id]; i++) {
Dtype cur_data = in_data[i * in_dim];
max = cur_data > max ? cur_data : max;
}
output_data[tid] = max;
} else {
return;
}
}
void SequencePoolConcatCompute::PrepareForRun() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
int in_num = param.X.size();
std::vector<int64_t> shape({in_num, 1, 1, 1});
_in_offset_tensor.Resize(shape);
_in_ptr_tensor.Resize(shape);
_in_pool_type_tensor.Resize(shape);
int* in_pool_type_data =
_in_pool_type_tensor.mutable_data<int>(TARGET(kCUDA));
std::vector<int> pool_type_list;
for (auto type : param.pool_type) {
if (type == "AVERAGE") {
pool_type_list.push_back(1);
} else if (type == "SUM") {
pool_type_list.push_back(2);
} else if (type == "SQRT") {
pool_type_list.push_back(3);
} else if (type == "LAST") {
pool_type_list.push_back(4);
} else if (type == "FIRST") {
pool_type_list.push_back(5);
} else if (type == "MAX") {
pool_type_list.push_back(6);
} else {
LOG(ERROR) << "pool type " << type << " is not supoorted.";
}
}
_is_in_same_len = true;
int in_len = param.X[0]->dims().count(1, param.X[0]->dims().size());
std::vector<int> out_id_seq_map_list;
std::vector<int> out_offset_list;
int total_len = 0;
out_offset_list.push_back(total_len);
for (int i = 0; i < in_num; ++i) {
int cur_len = param.X[i]->dims().count(1, param.X[i]->dims().size());
_is_in_same_len = _is_in_same_len && in_len == cur_len;
for (int k = 0; k < cur_len; ++k) {
out_id_seq_map_list.push_back(i);
}
total_len += cur_len;
out_offset_list.push_back(total_len);
}
std::vector<int64_t> out_id_seq_map_shape({total_len, 1, 1, 1});
std::vector<int64_t> out_offset_shape({in_num + 1, 1, 1, 1});
_out_offset_tensor.Resize(out_offset_shape);
_out_id_seq_map_tensor.Resize(out_id_seq_map_shape);
int* out_offset_data = _out_offset_tensor.mutable_data<int>(TARGET(kCUDA));
int* out_id_seq_map_data =
_out_id_seq_map_tensor.mutable_data<int>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(in_pool_type_data,
&pool_type_list[0],
sizeof(int) * param.X.size(),
IoDirection::HtoD,
stream);
TargetWrapperCuda::MemcpyAsync(out_offset_data,
&out_offset_list[0],
sizeof(int) * out_offset_list.size(),
IoDirection::HtoD,
stream);
TargetWrapperCuda::MemcpyAsync(out_id_seq_map_data,
&out_id_seq_map_list[0],
sizeof(int) * out_id_seq_map_list.size(),
IoDirection::HtoD,
stream);
hipStreamSynchronize(stream);
}
void SequencePoolConcatCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
auto& inputs = param.X;
auto offset = inputs[0]->lod()[0];
int batch = offset.size() - 1;
CHECK_GE(offset.size(), 1);
std::vector<int> all_offset;
for (int i = 0; i < inputs.size(); ++i) {
auto it = all_offset.end();
auto cur_offset = inputs[i]->lod()[0];
all_offset.insert(it, cur_offset.begin(), cur_offset.end());
}
int total_size = all_offset.size();
std::vector<int64_t> offset_shape({total_size, 1, 1, 1});
_in_offset_tensor.Resize(offset_shape);
int* offset_data = _in_offset_tensor.mutable_data<int>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(offset_data,
&all_offset[0],
sizeof(int) * all_offset.size(),
IoDirection::HtoD,
stream);
std::vector<uint64_t> in_locate_vec;
for (int i = 0; i < inputs.size(); ++i) {
in_locate_vec.push_back(
reinterpret_cast<uintptr_t>(inputs[i]->data<float>()));
}
uint64_t* in_locate_data =
_in_ptr_tensor.mutable_data<uint64_t>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(in_locate_data,
&in_locate_vec[0],
sizeof(uint64_t) * inputs.size(),
IoDirection::HtoD,
stream);
const int* in_pool_type_data = _in_pool_type_tensor.data<int>();
const int* out_id_seq_map_data = _out_id_seq_map_tensor.data<int>();
const int* out_offset_data = _out_offset_tensor.data<int>();
int count = param.Out->numel();
int in_dim = inputs[0]->numel() / inputs[0]->dims()[0];
float* out_data = param.Out->mutable_data<float>(TARGET(kCUDA));
int in_num = inputs.size();
if (_is_in_same_len) {
hipLaunchKernelGGL(( sequence_pool_concat<
float>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream,
in_locate_data,
in_pool_type_data,
out_data,
offset_data,
batch,
in_num,
in_dim);
} else {
int out_dim = param.Out->numel() / param.Out->dims()[0];
hipLaunchKernelGGL(( sequence_pool_concat<
float>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream,
in_locate_data,
in_pool_type_data,
out_data,
offset_data,
batch,
in_num,
out_offset_data,
out_id_seq_map_data,
out_dim);
}
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(sequence_pool_concat,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SequencePoolConcatCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
| 11751b3320deee17f85938ad2602b5b21b68019f.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/kernels/cuda/sequence_pool_concat_compute.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename Dtype>
__global__ void sequence_pool_concat(const uint64_t* input_locate_data,
const int* pool_type_list,
Dtype* output_data,
const int* offset,
int batch,
int in_num,
int in_dim) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int em_id = tid % in_dim;
int in_id = (tid / in_dim) % in_num;
int seq_id = tid / (in_dim * in_num);
if (seq_id >= batch) {
return;
}
Dtype* out_data = output_data + tid;
int offset_id = in_id * (batch + 1) + seq_id;
if (pool_type_list[in_id] == 4) { // last
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id;
output_data[tid] = in_data[(offset[offset_id + 1] - 1) * in_dim];
} else if (pool_type_list[in_id] == 6) { // max
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id + offset[offset_id] * in_dim;
Dtype max = in_data[0];
for (int i = 1; i < offset[offset_id + 1] - offset[offset_id]; i++) {
Dtype cur_data = in_data[i * in_dim];
max = cur_data > max ? cur_data : max;
}
output_data[tid] = max;
} else {
return;
}
}
template <typename Dtype>
__global__ void sequence_pool_concat(const uint64_t* input_locate_data,
const int* pool_type_list,
Dtype* output_data,
const int* offset,
int batch,
int in_num,
const int* out_offset,
const int* out_id_seq_map_data,
int out_dim) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int em_id = tid % out_dim;
int seq_id = tid / out_dim;
int in_id = out_id_seq_map_data[em_id];
em_id = em_id - out_offset[in_id];
int in_dim = out_offset[in_id + 1] - out_offset[in_id];
if (seq_id >= batch) {
return;
}
Dtype* out_data = output_data + tid;
int offset_id = in_id * (batch + 1) + seq_id;
if (pool_type_list[in_id] == 4) { // last
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id;
output_data[tid] = in_data[(offset[offset_id + 1] - 1) * in_dim];
} else if (pool_type_list[in_id] == 6) { // max
const Dtype* in_data =
reinterpret_cast<const Dtype*>(
reinterpret_cast<uintptr_t>(input_locate_data[in_id])) +
em_id + offset[offset_id] * in_dim;
Dtype max = in_data[0];
for (int i = 1; i < offset[offset_id + 1] - offset[offset_id]; i++) {
Dtype cur_data = in_data[i * in_dim];
max = cur_data > max ? cur_data : max;
}
output_data[tid] = max;
} else {
return;
}
}
void SequencePoolConcatCompute::PrepareForRun() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
int in_num = param.X.size();
std::vector<int64_t> shape({in_num, 1, 1, 1});
_in_offset_tensor.Resize(shape);
_in_ptr_tensor.Resize(shape);
_in_pool_type_tensor.Resize(shape);
int* in_pool_type_data =
_in_pool_type_tensor.mutable_data<int>(TARGET(kCUDA));
std::vector<int> pool_type_list;
for (auto type : param.pool_type) {
if (type == "AVERAGE") {
pool_type_list.push_back(1);
} else if (type == "SUM") {
pool_type_list.push_back(2);
} else if (type == "SQRT") {
pool_type_list.push_back(3);
} else if (type == "LAST") {
pool_type_list.push_back(4);
} else if (type == "FIRST") {
pool_type_list.push_back(5);
} else if (type == "MAX") {
pool_type_list.push_back(6);
} else {
LOG(ERROR) << "pool type " << type << " is not supoorted.";
}
}
_is_in_same_len = true;
int in_len = param.X[0]->dims().count(1, param.X[0]->dims().size());
std::vector<int> out_id_seq_map_list;
std::vector<int> out_offset_list;
int total_len = 0;
out_offset_list.push_back(total_len);
for (int i = 0; i < in_num; ++i) {
int cur_len = param.X[i]->dims().count(1, param.X[i]->dims().size());
_is_in_same_len = _is_in_same_len && in_len == cur_len;
for (int k = 0; k < cur_len; ++k) {
out_id_seq_map_list.push_back(i);
}
total_len += cur_len;
out_offset_list.push_back(total_len);
}
std::vector<int64_t> out_id_seq_map_shape({total_len, 1, 1, 1});
std::vector<int64_t> out_offset_shape({in_num + 1, 1, 1, 1});
_out_offset_tensor.Resize(out_offset_shape);
_out_id_seq_map_tensor.Resize(out_id_seq_map_shape);
int* out_offset_data = _out_offset_tensor.mutable_data<int>(TARGET(kCUDA));
int* out_id_seq_map_data =
_out_id_seq_map_tensor.mutable_data<int>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(in_pool_type_data,
&pool_type_list[0],
sizeof(int) * param.X.size(),
IoDirection::HtoD,
stream);
TargetWrapperCuda::MemcpyAsync(out_offset_data,
&out_offset_list[0],
sizeof(int) * out_offset_list.size(),
IoDirection::HtoD,
stream);
TargetWrapperCuda::MemcpyAsync(out_id_seq_map_data,
&out_id_seq_map_list[0],
sizeof(int) * out_id_seq_map_list.size(),
IoDirection::HtoD,
stream);
cudaStreamSynchronize(stream);
}
void SequencePoolConcatCompute::Run() {
auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<CUDAContext>();
auto stream = ctx.exec_stream();
auto& inputs = param.X;
auto offset = inputs[0]->lod()[0];
int batch = offset.size() - 1;
CHECK_GE(offset.size(), 1);
std::vector<int> all_offset;
for (int i = 0; i < inputs.size(); ++i) {
auto it = all_offset.end();
auto cur_offset = inputs[i]->lod()[0];
all_offset.insert(it, cur_offset.begin(), cur_offset.end());
}
int total_size = all_offset.size();
std::vector<int64_t> offset_shape({total_size, 1, 1, 1});
_in_offset_tensor.Resize(offset_shape);
int* offset_data = _in_offset_tensor.mutable_data<int>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(offset_data,
&all_offset[0],
sizeof(int) * all_offset.size(),
IoDirection::HtoD,
stream);
std::vector<uint64_t> in_locate_vec;
for (int i = 0; i < inputs.size(); ++i) {
in_locate_vec.push_back(
reinterpret_cast<uintptr_t>(inputs[i]->data<float>()));
}
uint64_t* in_locate_data =
_in_ptr_tensor.mutable_data<uint64_t>(TARGET(kCUDA));
TargetWrapperCuda::MemcpyAsync(in_locate_data,
&in_locate_vec[0],
sizeof(uint64_t) * inputs.size(),
IoDirection::HtoD,
stream);
const int* in_pool_type_data = _in_pool_type_tensor.data<int>();
const int* out_id_seq_map_data = _out_id_seq_map_tensor.data<int>();
const int* out_offset_data = _out_offset_tensor.data<int>();
int count = param.Out->numel();
int in_dim = inputs[0]->numel() / inputs[0]->dims()[0];
float* out_data = param.Out->mutable_data<float>(TARGET(kCUDA));
int in_num = inputs.size();
if (_is_in_same_len) {
sequence_pool_concat<
float><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>(
in_locate_data,
in_pool_type_data,
out_data,
offset_data,
batch,
in_num,
in_dim);
} else {
int out_dim = param.Out->numel() / param.Out->dims()[0];
sequence_pool_concat<
float><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>(
in_locate_data,
in_pool_type_data,
out_data,
offset_data,
batch,
in_num,
out_offset_data,
out_id_seq_map_data,
out_dim);
}
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_LITE_KERNEL(sequence_pool_concat,
kCUDA,
kFloat,
kNCHW,
paddle::lite::kernels::cuda::SequencePoolConcatCompute,
def)
.BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
|
f9f333d84d9cd4ebf17f520730dbf68141f551b8.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh";
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
} // namespace at::native
| f9f333d84d9cd4ebf17f520730dbf68141f551b8.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh";
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
} // namespace at::native
|
34024134e760887fac6f245e8bb3c946d1e340fb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This example benchmarks copying two data arrays to and from the GPU.
* It uses unified memory memory and chrono, cuda events, and NVTX for timing.
*/
#include "cuda_helper.h"
template<typename T>
void MemManaged<T>::run_chrono( ) {
for ( int i = 0; i < this->loops; i++ ) {
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
this->start = std::chrono::high_resolution_clock::now( );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
CUDA_RT_CALL( hipDeviceSynchronize( ) );
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
this->stop = std::chrono::high_resolution_clock::now( );
this->elapsed_chrono_ms += this->stop - this->start;
}
this->get_chrono_results( size );
}
template<typename T>
void MemManaged<T>::run_events( ) {
for ( int i = 0; i < this->loops; i++ ) {
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
CUDA_RT_CALL( hipEventRecord( this->start_event ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
CUDA_RT_CALL( hipDeviceSynchronize( ) );
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
CUDA_RT_CALL( hipEventRecord( this->stop_event ) );
CUDA_RT_CALL( hipEventSynchronize( this->stop_event ) );
CUDA_RT_CALL( hipEventElapsedTime( &this->elapsed_events_ms, this->start_event, this->stop_event ) );
this->average_events_ms += this->elapsed_events_ms;
}
this->get_events_results( size );
}
template<typename T>
void MemManaged<T>::run_nvtx( ) {
for ( int i = 0; i < this->loops; i++ ) {
PUSH_RANGE( "Reset", 0 )
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
POP_RANGE( )
PUSH_RANGE( "Process_Loop", 1 )
PUSH_RANGE( "Kernel_A", 2 )
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
POP_RANGE( )
PUSH_RANGE( "Kernel_B", 3 )
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
POP_RANGE( )
CUDA_RT_CALL( hipDeviceSynchronize( ) );
PUSH_RANGE( "Verify", 4 )
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
POP_RANGE( )
POP_RANGE( )
}
}
/* Main */
int main( int argc, char **argv ) {
using dtype = float;
int N = MemCpy<dtype>::init_size;
if ( argc > 1 ) {
N = std::atoi( argv[1] );
}
MemManaged<dtype> MemManaged( N );
double gigabytes { MemManaged.size * 1e-9 };
printf( "Running with = %lu B (%0.2f GB)\n\n", MemManaged.size, gigabytes );
// Chrono
MemManaged.run_chrono( );
// Events
MemManaged.run_events( );
// NVTX
MemManaged.run_nvtx( );
return ( EXIT_SUCCESS );
} | 34024134e760887fac6f245e8bb3c946d1e340fb.cu | /*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This example benchmarks copying two data arrays to and from the GPU.
* It uses unified memory memory and chrono, cuda events, and NVTX for timing.
*/
#include "cuda_helper.h"
template<typename T>
void MemManaged<T>::run_chrono( ) {
for ( int i = 0; i < this->loops; i++ ) {
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
this->start = std::chrono::high_resolution_clock::now( );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
CUDA_RT_CALL( cudaDeviceSynchronize( ) );
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
this->stop = std::chrono::high_resolution_clock::now( );
this->elapsed_chrono_ms += this->stop - this->start;
}
this->get_chrono_results( size );
}
template<typename T>
void MemManaged<T>::run_events( ) {
for ( int i = 0; i < this->loops; i++ ) {
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
CUDA_RT_CALL( cudaEventRecord( this->start_event ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
CUDA_RT_CALL( cudaDeviceSynchronize( ) );
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
CUDA_RT_CALL( cudaEventRecord( this->stop_event ) );
CUDA_RT_CALL( cudaEventSynchronize( this->stop_event ) );
CUDA_RT_CALL( cudaEventElapsedTime( &this->elapsed_events_ms, this->start_event, this->stop_event ) );
this->average_events_ms += this->elapsed_events_ms;
}
this->get_events_results( size );
}
template<typename T>
void MemManaged<T>::run_nvtx( ) {
for ( int i = 0; i < this->loops; i++ ) {
PUSH_RANGE( "Reset", 0 )
this->reset( N, h_a_unified.get( ), h_b_unified.get( ) );
POP_RANGE( )
PUSH_RANGE( "Process_Loop", 1 )
PUSH_RANGE( "Kernel_A", 2 )
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Add<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->a_args ) );
POP_RANGE( )
PUSH_RANGE( "Kernel_B", 3 )
CUDA_RT_CALL( cudaLaunchKernel( reinterpret_cast<void *>( &VectorOperation<Sub<T>, T> ),
this->blocks_per_grid,
this->threads_per_block,
this->b_args ) );
POP_RANGE( )
CUDA_RT_CALL( cudaDeviceSynchronize( ) );
PUSH_RANGE( "Verify", 4 )
this->verify( N, h_a_unified.get( ), h_b_unified.get( ) );
POP_RANGE( )
POP_RANGE( )
}
}
/* Main */
int main( int argc, char **argv ) {
using dtype = float;
int N = MemCpy<dtype>::init_size;
if ( argc > 1 ) {
N = std::atoi( argv[1] );
}
MemManaged<dtype> MemManaged( N );
double gigabytes { MemManaged.size * 1e-9 };
printf( "Running with = %lu B (%0.2f GB)\n\n", MemManaged.size, gigabytes );
// Chrono
MemManaged.run_chrono( );
// Events
MemManaged.run_events( );
// NVTX
MemManaged.run_nvtx( );
return ( EXIT_SUCCESS );
} |
f9637a0d6c5ae706ec4758d038af7244f7a061d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "reduce.h"
//{{{void parallel_sum( int *R_d,
/**
* @param R_d Address of element array on device
* @param block_size Number of threads per block
* @param Rd_size Number of elemens in R_d
* @param n Number of elemens each thread handles
*/
void parallel_sum( unsigned int *R_d,
int block_size,
int Rd_size,
int n)
{
unsigned int left = Rd_size;
while (left > 1) {
int grid_size = ( left + block_size*n - 1) / (block_size * n);
dim3 dimGridR( grid_size);
dim3 dimBlockR( block_size );
size_t sm_size = dimBlockR.x * sizeof(int);
hipLaunchKernelGGL(( add_unsigned_ints_cuda) , dim3(dimGridR), dim3(dimBlockR), sm_size, 0,
R_d, left, n);
hipDeviceSynchronize();
hipError_t err;
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "My Reduce: %s.\n", hipGetErrorString( err) );
left = dimGridR.x;
}
}
//}}}
//{{{__global__ void my_reduce( int *gdata,
__global__
void add_unsigned_ints_cuda(unsigned int *gdata,
unsigned int size,
unsigned int n )
{
extern __shared__ int sdata[];
/* v1 load: need N threads
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < size)
sdata[tid] = gdata[i];
else
sdata[tid] = 0;
__syncthreads();
*/
/* v2 load: need N/2 threads
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
if (i < size)
sdata[tid] = gdata[i];
else
sdata[tid] = 0;
if (i + blockDim.x < size)
sdata[tid] += gdata[i + blockDim.x];
else
sdata[tid] += 0;
__syncthreads();
*/
/* v3 load: need N/n threads */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * ( 2 * blockDim.x ) + tid;
unsigned int grid_size = blockDim.x * ( 2 * gridDim.x);
sdata[tid] = 0;
while ( i < (n * grid_size) ) {
if (i < size)
sdata[tid] += gdata[i];
if ( (i + blockDim.x) < size)
sdata[tid] += gdata[i + blockDim.x];
i += grid_size;
}
__syncthreads();
/* v1 calc
unsigned int s;
for (s = 1; s < blockDim.x; s*=2) {
if (tid % (2*s) == 0)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
*/
/* v2 calc
unsigned int s;
for (s = 1; s < blockDim.x; s*=2) {
int index = 2 * s * tid;
if (index < blockDim.x)
sdata[index] += sdata[index + s];
__syncthreads();
}
*/
/* v3 calc */
unsigned int s;
for (s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
/* v5 calc
if (blockDim.x >= 512) {
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
}
if (tid < 32) {
if (blockDim.x >= 64)
sdata[tid] += sdata[tid + 32];
if (blockDim.x >= 32)
sdata[tid] += sdata[tid + 16];
if (blockDim.x >= 16)
sdata[tid] += sdata[tid + 8];
if (blockDim.x >= 8)
sdata[tid] += sdata[tid + 4];
if (blockDim.x >= 4)
sdata[tid] += sdata[tid + 2];
if (blockDim.x >= 4)
sdata[tid] += sdata[tid + 2];
if (blockDim.x >= 2)
sdata[tid] += sdata[tid + 1];
} */
if (tid == 0)
gdata[blockIdx.x] = sdata[0];
}
//}}}
| f9637a0d6c5ae706ec4758d038af7244f7a061d3.cu | #include <stdio.h>
#include <cuda.h>
#include "reduce.h"
//{{{void parallel_sum( int *R_d,
/**
* @param R_d Address of element array on device
* @param block_size Number of threads per block
* @param Rd_size Number of elemens in R_d
* @param n Number of elemens each thread handles
*/
void parallel_sum( unsigned int *R_d,
int block_size,
int Rd_size,
int n)
{
unsigned int left = Rd_size;
while (left > 1) {
int grid_size = ( left + block_size*n - 1) / (block_size * n);
dim3 dimGridR( grid_size);
dim3 dimBlockR( block_size );
size_t sm_size = dimBlockR.x * sizeof(int);
add_unsigned_ints_cuda <<<dimGridR, dimBlockR, sm_size>>>
(R_d, left, n);
cudaThreadSynchronize();
cudaError_t err;
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "My Reduce: %s.\n", cudaGetErrorString( err) );
left = dimGridR.x;
}
}
//}}}
//{{{__global__ void my_reduce( int *gdata,
__global__
void add_unsigned_ints_cuda(unsigned int *gdata,
unsigned int size,
unsigned int n )
{
extern __shared__ int sdata[];
/* v1 load: need N threads
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < size)
sdata[tid] = gdata[i];
else
sdata[tid] = 0;
__syncthreads();
*/
/* v2 load: need N/2 threads
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
if (i < size)
sdata[tid] = gdata[i];
else
sdata[tid] = 0;
if (i + blockDim.x < size)
sdata[tid] += gdata[i + blockDim.x];
else
sdata[tid] += 0;
__syncthreads();
*/
/* v3 load: need N/n threads */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * ( 2 * blockDim.x ) + tid;
unsigned int grid_size = blockDim.x * ( 2 * gridDim.x);
sdata[tid] = 0;
while ( i < (n * grid_size) ) {
if (i < size)
sdata[tid] += gdata[i];
if ( (i + blockDim.x) < size)
sdata[tid] += gdata[i + blockDim.x];
i += grid_size;
}
__syncthreads();
/* v1 calc
unsigned int s;
for (s = 1; s < blockDim.x; s*=2) {
if (tid % (2*s) == 0)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
*/
/* v2 calc
unsigned int s;
for (s = 1; s < blockDim.x; s*=2) {
int index = 2 * s * tid;
if (index < blockDim.x)
sdata[index] += sdata[index + s];
__syncthreads();
}
*/
/* v3 calc */
unsigned int s;
for (s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
/* v5 calc
if (blockDim.x >= 512) {
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
}
if (tid < 32) {
if (blockDim.x >= 64)
sdata[tid] += sdata[tid + 32];
if (blockDim.x >= 32)
sdata[tid] += sdata[tid + 16];
if (blockDim.x >= 16)
sdata[tid] += sdata[tid + 8];
if (blockDim.x >= 8)
sdata[tid] += sdata[tid + 4];
if (blockDim.x >= 4)
sdata[tid] += sdata[tid + 2];
if (blockDim.x >= 4)
sdata[tid] += sdata[tid + 2];
if (blockDim.x >= 2)
sdata[tid] += sdata[tid + 1];
} */
if (tid == 0)
gdata[blockIdx.x] = sdata[0];
}
//}}}
|
5397020077ae88ac6b47c7df86f93e7957004d43.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "plus_offsets.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *coords = NULL;
hipMalloc(&coords, XSIZE*YSIZE);
float *random = NULL;
hipMalloc(&random, XSIZE*YSIZE);
size_t total_size = XSIZE*YSIZE;
float alpha = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
plus_offsets), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,random,total_size,alpha);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
plus_offsets), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,random,total_size,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
plus_offsets), dim3(gridBlock),dim3(threadBlock), 0, 0, coords,random,total_size,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5397020077ae88ac6b47c7df86f93e7957004d43.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "plus_offsets.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *coords = NULL;
cudaMalloc(&coords, XSIZE*YSIZE);
float *random = NULL;
cudaMalloc(&random, XSIZE*YSIZE);
size_t total_size = XSIZE*YSIZE;
float alpha = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
plus_offsets<<<gridBlock,threadBlock>>>(coords,random,total_size,alpha);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
plus_offsets<<<gridBlock,threadBlock>>>(coords,random,total_size,alpha);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
plus_offsets<<<gridBlock,threadBlock>>>(coords,random,total_size,alpha);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b764647a67ee7997a8cd5147f18d1293a44d861d.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| b764647a67ee7997a8cd5147f18d1293a44d861d.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
4aec6be5d0ef9608da4efba623bb9c9cd7563620.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/DeviceGuard.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(boxes.device());
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
| 4aec6be5d0ef9608da4efba623bb9c9cd7563620.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/DeviceGuard.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
// Ensure CUDA uses the input tensor device.
at::DeviceGuard guard(boxes.device());
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
53ad910136f8f458bdb7d149c8042f979a9f5646.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
//printf("id: %d ", threadIdx.x + blockIdx.x * blockDim.x);
int index = (threadIdx.x + blockIdx.x * blockDim.x) % (numRows * numCols);
const uchar4* origin = &rgbaImage[index];
greyImage[index] = .299f * origin->x + .587f * origin->y + .114f * origin->z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize((numRows * numCols - 1) / 1024 + 1, 1, 1); //TODO
const dim3 gridSize(1024, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(blockSize), dim3(gridSize) , 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 53ad910136f8f458bdb7d149c8042f979a9f5646.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
//printf("id: %d ", threadIdx.x + blockIdx.x * blockDim.x);
int index = (threadIdx.x + blockIdx.x * blockDim.x) % (numRows * numCols);
const uchar4* origin = &rgbaImage[index];
greyImage[index] = .299f * origin->x + .587f * origin->y + .114f * origin->z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize((numRows * numCols - 1) / 1024 + 1, 1, 1); //TODO
const dim3 gridSize(1024, 1, 1); //TODO
rgba_to_greyscale<<<blockSize, gridSize >>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
e22847a749297b52db6b360c759e81aea335e583.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
//TODO: not sure if this block is needed
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
//shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
/*__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
//TODO: convert short to int
//TODO: can direct get width from const params
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
//Read data from imgPtr to shared memory
extern __shared__ float shared_imgData[];
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
for (int k = 0 ; k < 4; k ++) {
shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + k] = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + k];
}
__syncthreads();
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
int index = circleImgBlockList[arrIdx] - 1;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
float4* shared_imgPtr = (float4*)(&shared_imgData[4 * (threadIdx.y * (imageWidth/gridDim.x) + threadIdx.x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, shared_imgPtr);
}
__syncthreads();
//Write data to global memory from shared memory
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float r = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x)];
float g = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 1];
float b = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 2];
float a = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 3];
*imgPtr = make_float4(r,g,b,a);
}
*/
__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
const int sharedSize = 1024;
//const int sharedSize2 = 1024 * 3;
const int totalThreads = blockDim.x * blockDim.y;
__shared__ int sharedData[sharedSize];
__shared__ float3 sharedData1[sharedSize];
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
int sharedCirclePairs = end_addr - start_addr;
int data_per_thread;
int sharedDataOverhead = 0;
int index1;
float3 p;
if(sharedCirclePairs<sharedSize)
data_per_thread = (end_addr-start_addr + totalThreads-1)/totalThreads;
else{
data_per_thread = (sharedSize+totalThreads-1)/totalThreads;
sharedDataOverhead = 1;
}
for(int i=0; i < data_per_thread; i++ ){
int tid = threadIdx.y * blockDim.y + threadIdx.x;
if(tid < sharedCirclePairs){
sharedData[i + data_per_thread * tid] = circleImgBlockList[start_addr + i + data_per_thread * tid];
index1 = 3 * sharedData[i + data_per_thread * tid];
sharedData1[i + data_per_thread * tid] = *(float3*)(&cuConstRendererParams.position[index1]);
}
}
__syncthreads();
printf("all thread reach here:\n");
if(sharedCirclePairs){
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
int index ;
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
if(sharedDataOverhead && ((arrIdx-start_addr) >sharedSize))
index = circleImgBlockList[arrIdx] - 1;
else
index = sharedData[arrIdx-start_addr] - 1;
int index3 = 3 * index;
if(sharedDataOverhead && ((arrIdx-start_addr) >sharedSize))
p = *(float3*)(&cuConstRendererParams.position[index3]);
else
p = sharedData1[arrIdx-start_addr];
//float rad = cuConstRendererParams.radius[index];
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
__syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
}
}
}
/*__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
int index = circleImgBlockList[arrIdx]-1;
//if(threadIdx.x + threadIdx.y == 0) {
// printf("blockid = %d, index = %d\n", (blockIdx.y*gridDim.x + blockIdx.x), index);
//}
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
// BlockDim = 256 x1, gridDim = 4x4
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
}
}*/
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
__global__ void make_circleImgBlockArray(int *circleImgBlockArray, int *circleImgBlockId, int imgBlockWidth, int imgBlockNum) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
//printf("Index : %d\n", index);
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
printf("MinX = %d\n",screenMinX/imgBlockWidth);
printf("MaxX = %d\n",screenMaxX/imgBlockWidth);
printf("MinY = %d\n",screenMinY/imgBlockWidth);
printf("MaxY = %d\n",screenMaxY/imgBlockWidth);
*/
for (short x = (screenMinX/imgBlockWidth); x <= (screenMaxX/imgBlockWidth); x++) {
for (short y = (screenMinY/imgBlockWidth); y <= (screenMaxY/imgBlockWidth); y++) {
if((x == imgBlockNum) || (y == imgBlockNum)) { continue;}
circleImgBlockArray[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = 1;
circleImgBlockId[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = index+1;
//printf("Index = %d %d %d\n", x, y, index);
//printf("HERE!!!!\n");
}
}
}
__global__ void print_kernel(int length, int* input) {
printf("HERE\n");
for(int i=0; i< length; i++) {
printf("input[%d] = %d\n", i, input[i]);
}
}
__global__ void compare_array(int length, int* array1, int* array2) {
for(int i=0; i< length; i++) {
if(array1[i] != array2[i]) {
printf("Arrays don't match. Expected = %d, Got = %d\n", array1[i], array2[i]);
}
}
}
__global__ void getRefCircleArray(int* refCircleImgArray) {
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//printf("ID: %d\n" , index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles);
refCircleImgArray[index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles] = circleInBox;
}
}
//predicate functor
template <typename T>
struct is_not_zero : public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(T x)
{
return (x != 0);
}
};
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
__global__ void kernelRenderSmallCircles() {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
//TODO: is it converted to registers?? ---> I think its best if I pass by reference??
//float4 imgPtr = *(float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
__syncthreads(); //to make sure this shared memory is visible to everyone --> can remove this as the syncthreads below will take care of it
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
//__shared__ int circleInBox;
//if(threadIdx.x + threadIdx.y == 0) {
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//}
//__syncthreads(); //TODO: is this even needed? --- but why?
if(circleInBox == 0) { continue; }
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
__syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
/*
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
*/
int* circleImgBlockArray = NULL;
int* circleImgBlockId = NULL;
//printf("NumCircles = %d\n",numCircles);
if (numCircles < 5) {
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
//TODO: why does perf tank with more kernels --- what's the trade off?
hipLaunchKernelGGL(( kernelRenderSmallCircles), dim3(gridDim), dim3(blockDim), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
} else {
int imgBlockNum =16;
//if(numCircles < 5) {
// imgBlockNum = 1;
//} else if(numCircles < 100) {
// imgBlockNum = 2;
//} else if(numCircles < 1000) {
// imgBlockNum = 4;
//} else if (numCircles < 10000) {
// imgBlockNum = 8;
//} else {
// imgBlockNum = 32;
//}
int numImgBlocks = imgBlockNum * imgBlockNum;
int numElements = numCircles * imgBlockNum * imgBlockNum;
hipMalloc(&circleImgBlockArray, sizeof(int) * numElements);
hipMalloc(&circleImgBlockId, sizeof(int) * numElements);
//gpuErrchk(hipDeviceSynchronize());
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
hipLaunchKernelGGL(( make_circleImgBlockArray), dim3(gridDim), dim3(blockDim), 0, 0, circleImgBlockArray,circleImgBlockId,imageWidth/imgBlockNum, imgBlockNum);
/*Convert the 2D circle block array into 1 D array by removing 0 values */
thrust::device_ptr<int> thrust_arr = thrust::device_pointer_cast(circleImgBlockArray);
thrust::device_ptr<int> thrust_circleid = thrust::device_pointer_cast(circleImgBlockId);
//thrust::device_vector<int> prefix_sum(num_img_blocks);
//Gets the number circles per each block
//This is used to generate the starting address of each array
//int *reduced = NULL;
//reduced = (int*) malloc(sizeof(int)*(numImgBlocks+1));
// allocate storage for rowu sums and indices
thrust::device_vector<int> row_sums(numImgBlocks+1);
thrust::device_vector<int> row_indices(numImgBlocks);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)) + numElements,
thrust_arr,
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
thrust::fill(thrust::device, row_sums.end() - 1, row_sums.end(), 0);
//thrust::copy(row_sums.begin(), row_sums.end(), std::ostream_iterator<int>(std::cout, " "));
//TODO CHECK: Are these matching? If yes, what is more performant?
//for(int i=0; i< numImgBlocks; i++) {
// printf("Row_sums[%d] = %d\n", i, row_sums[i]);
//}
/*
for(int i=0; i<numImgBlocks; i++) {
reduced[i] = thrust::reduce(thrust_arr+numCircles*i, thrust_arr + numCircles*(i+1));
// printf("Reduced[%d] %d\n", i, reduced[i]);
}
reduced[numImgBlocks] = 0;
//Run exclusive scan to get starting address of each array
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::device_ptr<int> reduced_gpu = thrust::device_malloc<int>(numImgBlocks+1);
hipMemcpy(reduced_gpu.get(), reduced, (numImgBlocks + 1) * sizeof(int),
hipMemcpyHostToDevice);
thrust::exclusive_scan(reduced_gpu, reduced_gpu+numImgBlocks+1, circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
*/
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::exclusive_scan(row_sums.begin(), row_sums.end(), circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
//TODO CHECK: get a sum of reduced and compare that with num pairs to confirm its all correct!
//TODO CHECK: which one is more performant? -- choose that
int num_pairs = thrust::reduce(thrust_arr, thrust_arr + numElements);
//int num_pairs = circleStartAddr[numImgBlocks];
//printf("SUM = %d\n", num_pairs);
hipFree(circleImgBlockArray);
//thrust::device_vector<int> prefix_sum(num_img_blocks);
//kernelRenderCircles<<<gridDim, blockDim>>>();
// gpuErrchk(hipDeviceSynchronize());
//allocate the right size of array
//This array will be traversed by each block -- by using starting address from circleStartAddr
thrust::device_vector<int> circleImgBlockList(num_pairs);
thrust::copy_if(thrust_circleid, thrust_circleid + numElements, circleImgBlockList.begin(), is_not_zero<int>());
hipFree(circleImgBlockId);
//thrust::copy(circleImgBlockList.begin(), circleImgBlockList.end(), std::ostream_iterator<float>(std::cout, " "));
//TODO: can use cuda streams to parallelize these I think...
/*
int* refCircleImgArray = NULL;
hipMalloc(&refCircleImgArray, sizeof(int) * numCircles * imgBlockNum * imgBlockNum);
dim3 gridDim2(imgBlockNum, imgBlockNum);
getRefCircleArray<<<gridDim2, 1>>>(refCircleImgArray);
gpuErrchk(hipDeviceSynchronize());
compare_array<<<1,1>>>(numCircles * imgBlockNum * imgBlockNum, refCircleImgArray, circleImgBlockArray);
*/
//print_kernel<<<1,1>>>(numCircles * imgBlockNum * imgBlockNum, circleImgBlockArray);
//TODO: Need to free all these data strctures that I am creating!!
dim3 gridDim3(imgBlockNum, imgBlockNum);
dim3 blockDim3(16, 16);
//TODO: convert to shared memroy versiion??
//kernelRenderCircles<<<gridDim, blockDim,numPixelsPerBlock*sizeof(float)>>>(imageWidth, imageHeight);
int *deviceStartAddr = NULL;
deviceStartAddr = thrust::raw_pointer_cast(circleStartAddr.data());
int *deviceImgBlockList = NULL;
deviceImgBlockList = thrust::raw_pointer_cast(circleImgBlockList.data());
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim3), dim3(blockDim3), 0, 0, deviceImgBlockList, deviceStartAddr);
//kernelRenderCircles<<<gridDim3, blockDim3>>>(circleImgBlockList, deviceStartAddr);
gpuErrchk(hipDeviceSynchronize());
}
}
| e22847a749297b52db6b360c759e81aea335e583.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
//TODO: not sure if this block is needed
#include <thrust/host_vector.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float invWidth;
float invHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float& redPix, float& greenPix, float& bluePix, float& alphaPix) {
//shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
//TODO: why in 2 steps -- is it to avoid some hazard???!!
/*
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
*/
redPix = alpha * rgb.x + oneMinusAlpha * redPix;
greenPix = alpha * rgb.y + oneMinusAlpha * greenPix;
bluePix = alpha * rgb.z + oneMinusAlpha * bluePix;
alphaPix = alpha + alphaPix;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
/*__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
//TODO: convert short to int
//TODO: can direct get width from const params
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
//Read data from imgPtr to shared memory
extern __shared__ float shared_imgData[];
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
for (int k = 0 ; k < 4; k ++) {
shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + k] = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + k];
}
__syncthreads();
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
int index = circleImgBlockList[arrIdx] - 1;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
float4* shared_imgPtr = (float4*)(&shared_imgData[4 * (threadIdx.y * (imageWidth/gridDim.x) + threadIdx.x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, shared_imgPtr);
}
__syncthreads();
//Write data to global memory from shared memory
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float r = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x)];
float g = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 1];
float b = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 2];
float a = shared_imgData[4 * (threadIdx.y*(imageWidth/gridDim.x) + threadIdx.x) + 3];
*imgPtr = make_float4(r,g,b,a);
}
*/
__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
const int sharedSize = 1024;
//const int sharedSize2 = 1024 * 3;
const int totalThreads = blockDim.x * blockDim.y;
__shared__ int sharedData[sharedSize];
__shared__ float3 sharedData1[sharedSize];
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
int sharedCirclePairs = end_addr - start_addr;
int data_per_thread;
int sharedDataOverhead = 0;
int index1;
float3 p;
if(sharedCirclePairs<sharedSize)
data_per_thread = (end_addr-start_addr + totalThreads-1)/totalThreads;
else{
data_per_thread = (sharedSize+totalThreads-1)/totalThreads;
sharedDataOverhead = 1;
}
for(int i=0; i < data_per_thread; i++ ){
int tid = threadIdx.y * blockDim.y + threadIdx.x;
if(tid < sharedCirclePairs){
sharedData[i + data_per_thread * tid] = circleImgBlockList[start_addr + i + data_per_thread * tid];
index1 = 3 * sharedData[i + data_per_thread * tid];
sharedData1[i + data_per_thread * tid] = *(float3*)(&cuConstRendererParams.position[index1]);
}
}
__syncthreads();
printf("all thread reach here:\n");
if(sharedCirclePairs){
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
//k*# of pixels, added with linear thread ID
//Unrolled the k loop to avoid loop overhead
int index ;
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
if(sharedDataOverhead && ((arrIdx-start_addr) >sharedSize))
index = circleImgBlockList[arrIdx] - 1;
else
index = sharedData[arrIdx-start_addr] - 1;
int index3 = 3 * index;
if(sharedDataOverhead && ((arrIdx-start_addr) >sharedSize))
p = *(float3*)(&cuConstRendererParams.position[index3]);
else
p = sharedData1[arrIdx-start_addr];
//float rad = cuConstRendererParams.radius[index];
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
__syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
}
}
}
/*__global__ void kernelRenderCircles(int* circleImgBlockList, int* circleStartAddr) {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int start_addr = circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x];
int end_addr= circleStartAddr[blockIdx.y*gridDim.x + blockIdx.x + 1];
for (int arrIdx = start_addr; arrIdx < end_addr; arrIdx++) {
int index = circleImgBlockList[arrIdx]-1;
//if(threadIdx.x + threadIdx.y == 0) {
// printf("blockid = %d, index = %d\n", (blockIdx.y*gridDim.x + blockIdx.x), index);
//}
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
// BlockDim = 256 x1, gridDim = 4x4
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
}
}*/
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.invWidth = 1.f / image->width;
params.invHeight =1.f/image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
__global__ void make_circleImgBlockArray(int *circleImgBlockArray, int *circleImgBlockId, int imgBlockWidth, int imgBlockNum) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
int index3 = 3 * index;
//printf("Index : %d\n", index);
// read position and radius
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = cuConstRendererParams.imageWidth;
short imageHeight = cuConstRendererParams.imageHeight;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
printf("MinX = %d\n",screenMinX/imgBlockWidth);
printf("MaxX = %d\n",screenMaxX/imgBlockWidth);
printf("MinY = %d\n",screenMinY/imgBlockWidth);
printf("MaxY = %d\n",screenMaxY/imgBlockWidth);
*/
for (short x = (screenMinX/imgBlockWidth); x <= (screenMaxX/imgBlockWidth); x++) {
for (short y = (screenMinY/imgBlockWidth); y <= (screenMaxY/imgBlockWidth); y++) {
if((x == imgBlockNum) || (y == imgBlockNum)) { continue;}
circleImgBlockArray[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = 1;
circleImgBlockId[(y*imgBlockNum + x) *(cuConstRendererParams.numCircles) + index] = index+1;
//printf("Index = %d %d %d\n", x, y, index);
//printf("HERE!!!!\n");
}
}
}
__global__ void print_kernel(int length, int* input) {
printf("HERE\n");
for(int i=0; i< length; i++) {
printf("input[%d] = %d\n", i, input[i]);
}
}
__global__ void compare_array(int length, int* array1, int* array2) {
for(int i=0; i< length; i++) {
if(array1[i] != array2[i]) {
printf("Arrays don't match. Expected = %d, Got = %d\n", array1[i], array2[i]);
}
}
}
__global__ void getRefCircleArray(int* refCircleImgArray) {
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//printf("ID: %d\n" , index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles);
refCircleImgArray[index + (blockIdx.x + blockIdx.y*gridDim.x)*cuConstRendererParams.numCircles] = circleInBox;
}
}
//predicate functor
template <typename T>
struct is_not_zero : public thrust::unary_function<T,bool>
{
__host__ __device__
bool operator()(T x)
{
return (x != 0);
}
};
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
__global__ void kernelRenderSmallCircles() {
float invWidth = cuConstRendererParams.invWidth;
float invHeight = cuConstRendererParams.invHeight;
int imageWidth = cuConstRendererParams.imageWidth;
int imageHeight = cuConstRendererParams.imageHeight;
int x = blockIdx.x*(imageWidth/gridDim.x) + threadIdx.x;
int y = blockIdx.y*(imageHeight/gridDim.y) + threadIdx.y;
float red_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x))];
float green_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 1];
float blue_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 2];
float alpha_pixel = cuConstRendererParams.imageData[(4 * (y * imageWidth + x)) + 3];
//TODO: is it converted to registers?? ---> I think its best if I pass by reference??
//float4 imgPtr = *(float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
__syncthreads(); //to make sure this shared memory is visible to everyone --> can remove this as the syncthreads below will take care of it
for (int index = 0; index < cuConstRendererParams.numCircles; index++) {
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
float rad = cuConstRendererParams.radius[index];
// BlockDim = 256 x1, gridDim = 4x4
//__shared__ int circleInBox;
//if(threadIdx.x + threadIdx.y == 0) {
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
//}
//__syncthreads(); //TODO: is this even needed? --- but why?
if(circleInBox == 0) { continue; }
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, red_pixel, green_pixel, blue_pixel, alpha_pixel);
}
__syncthreads();
cuConstRendererParams.imageData[4 * (y * imageWidth + x)] = red_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 1] = green_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 2 ] = blue_pixel;
cuConstRendererParams.imageData[4 * (y * imageWidth + x) + 3 ] = alpha_pixel;
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
/*
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
*/
int* circleImgBlockArray = NULL;
int* circleImgBlockId = NULL;
//printf("NumCircles = %d\n",numCircles);
if (numCircles < 5) {
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
//int numPixels = imageWidth * imageHeight;
int temp1 = (imageWidth + blockDim.x - 1) / blockDim.x;
int temp2 = (imageHeight + blockDim.y - 1) / blockDim.y;
dim3 gridDim(temp1,temp2); //dividing it into block -- each block working on a portion of image
//NumPixels per block
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
//TODO: why does perf tank with more kernels --- what's the trade off?
kernelRenderSmallCircles<<<gridDim, blockDim>>>();
gpuErrchk(cudaDeviceSynchronize());
} else {
int imgBlockNum =16;
//if(numCircles < 5) {
// imgBlockNum = 1;
//} else if(numCircles < 100) {
// imgBlockNum = 2;
//} else if(numCircles < 1000) {
// imgBlockNum = 4;
//} else if (numCircles < 10000) {
// imgBlockNum = 8;
//} else {
// imgBlockNum = 32;
//}
int numImgBlocks = imgBlockNum * imgBlockNum;
int numElements = numCircles * imgBlockNum * imgBlockNum;
cudaMalloc(&circleImgBlockArray, sizeof(int) * numElements);
cudaMalloc(&circleImgBlockId, sizeof(int) * numElements);
//gpuErrchk(cudaDeviceSynchronize());
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
make_circleImgBlockArray<<<gridDim, blockDim>>>(circleImgBlockArray,circleImgBlockId,imageWidth/imgBlockNum, imgBlockNum);
/*Convert the 2D circle block array into 1 D array by removing 0 values */
thrust::device_ptr<int> thrust_arr = thrust::device_pointer_cast(circleImgBlockArray);
thrust::device_ptr<int> thrust_circleid = thrust::device_pointer_cast(circleImgBlockId);
//thrust::device_vector<int> prefix_sum(num_img_blocks);
//Gets the number circles per each block
//This is used to generate the starting address of each array
//int *reduced = NULL;
//reduced = (int*) malloc(sizeof(int)*(numImgBlocks+1));
// allocate storage for rowu sums and indices
thrust::device_vector<int> row_sums(numImgBlocks+1);
thrust::device_vector<int> row_indices(numImgBlocks);
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(numCircles)) + numElements,
thrust_arr,
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
thrust::fill(thrust::device, row_sums.end() - 1, row_sums.end(), 0);
//thrust::copy(row_sums.begin(), row_sums.end(), std::ostream_iterator<int>(std::cout, " "));
//TODO CHECK: Are these matching? If yes, what is more performant?
//for(int i=0; i< numImgBlocks; i++) {
// printf("Row_sums[%d] = %d\n", i, row_sums[i]);
//}
/*
for(int i=0; i<numImgBlocks; i++) {
reduced[i] = thrust::reduce(thrust_arr+numCircles*i, thrust_arr + numCircles*(i+1));
// printf("Reduced[%d] %d\n", i, reduced[i]);
}
reduced[numImgBlocks] = 0;
//Run exclusive scan to get starting address of each array
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::device_ptr<int> reduced_gpu = thrust::device_malloc<int>(numImgBlocks+1);
cudaMemcpy(reduced_gpu.get(), reduced, (numImgBlocks + 1) * sizeof(int),
cudaMemcpyHostToDevice);
thrust::exclusive_scan(reduced_gpu, reduced_gpu+numImgBlocks+1, circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
*/
thrust::device_vector<int> circleStartAddr(numImgBlocks+1);
thrust::exclusive_scan(row_sums.begin(), row_sums.end(), circleStartAddr.begin());
//thrust::copy(circleStartAddr.begin(), circleStartAddr.end(), std::ostream_iterator<float>(std::cout, " "));
//TODO CHECK: get a sum of reduced and compare that with num pairs to confirm its all correct!
//TODO CHECK: which one is more performant? -- choose that
int num_pairs = thrust::reduce(thrust_arr, thrust_arr + numElements);
//int num_pairs = circleStartAddr[numImgBlocks];
//printf("SUM = %d\n", num_pairs);
cudaFree(circleImgBlockArray);
//thrust::device_vector<int> prefix_sum(num_img_blocks);
//kernelRenderCircles<<<gridDim, blockDim>>>();
// gpuErrchk(cudaDeviceSynchronize());
//allocate the right size of array
//This array will be traversed by each block -- by using starting address from circleStartAddr
thrust::device_vector<int> circleImgBlockList(num_pairs);
thrust::copy_if(thrust_circleid, thrust_circleid + numElements, circleImgBlockList.begin(), is_not_zero<int>());
cudaFree(circleImgBlockId);
//thrust::copy(circleImgBlockList.begin(), circleImgBlockList.end(), std::ostream_iterator<float>(std::cout, " "));
//TODO: can use cuda streams to parallelize these I think...
/*
int* refCircleImgArray = NULL;
cudaMalloc(&refCircleImgArray, sizeof(int) * numCircles * imgBlockNum * imgBlockNum);
dim3 gridDim2(imgBlockNum, imgBlockNum);
getRefCircleArray<<<gridDim2, 1>>>(refCircleImgArray);
gpuErrchk(cudaDeviceSynchronize());
compare_array<<<1,1>>>(numCircles * imgBlockNum * imgBlockNum, refCircleImgArray, circleImgBlockArray);
*/
//print_kernel<<<1,1>>>(numCircles * imgBlockNum * imgBlockNum, circleImgBlockArray);
//TODO: Need to free all these data strctures that I am creating!!
dim3 gridDim3(imgBlockNum, imgBlockNum);
dim3 blockDim3(16, 16);
//TODO: convert to shared memroy versiion??
//kernelRenderCircles<<<gridDim, blockDim,numPixelsPerBlock*sizeof(float)>>>(imageWidth, imageHeight);
int *deviceStartAddr = NULL;
deviceStartAddr = thrust::raw_pointer_cast(circleStartAddr.data());
int *deviceImgBlockList = NULL;
deviceImgBlockList = thrust::raw_pointer_cast(circleImgBlockList.data());
//int numPixelsPerBlock = blockDim.x * blockDim.y * 4;
kernelRenderCircles<<<gridDim3, blockDim3>>>(deviceImgBlockList, deviceStartAddr);
//kernelRenderCircles<<<gridDim3, blockDim3>>>(circleImgBlockList, deviceStartAddr);
gpuErrchk(cudaDeviceSynchronize());
}
}
|
87190ac652a0d610184f4e4e52c0a584d8832e4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/softmax_grad_kernel.h"
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/equal.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/softmax.cu.h"
#include "paddle/phi/kernels/softmax_grad_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void SoftmaxGradGpuKernel(const IntT* out_crows,
const T* out_values,
const T* dout_values,
T* dx_values,
int row_number,
int total_row_number) {
// dx = (dout - sum(dout * out)) * out
int row = blockIdx.x * blockDim.y + threadIdx.y;
int non_zero_idx = threadIdx.x;
if (row >= total_row_number) return;
int cur_batch = row / row_number;
int crow_idx = cur_batch * (row_number + 1) + (row % row_number);
int cur_batch_offset = 0;
for (int i = 1; i < cur_batch + 1; ++i) {
cur_batch_offset += out_crows[i * (row_number + 1) - 1];
}
int row_first = cur_batch_offset + static_cast<int>(out_crows[crow_idx]);
int row_nnz = static_cast<int>(out_crows[crow_idx + 1] - out_crows[crow_idx]);
if (row_nnz == 0) return;
int kIteration = (row_nnz + warpSize - 1) / warpSize;
T mul_result = 0;
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
mul_result += out_values[row_first + idx] * dout_values[row_first + idx];
}
T sum = phi::funcs::WarpReduceSum<T>(mul_result, 0xFFFFFFFF);
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
dx_values[row_first + idx] =
(dout_values[row_first + idx] - sum) * out_values[row_first + idx];
}
}
template <typename T, typename Context>
void SoftmaxCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& out,
const SparseCsrTensor& dout,
int axis,
SparseCsrTensor* dx) {
PADDLE_ENFORCE_EQ(axis,
-1,
phi::errors::Unimplemented(
"SparseCsrTensor only support axis=-1 for softmax, "
"which is faster when reading data by row (axis=-1)"));
EmptyLikeCsrKernel<T, Context>(dev_ctx, dout, dx);
auto out_dim = out.dims();
auto out_rank = out_dim.size();
int total_row_number = 1;
int row_number = 1;
for (int i = 0; i < out_rank - 1; ++i) {
total_row_number *= out_dim[i];
if (i == out_rank - 2) {
row_number = out_dim[i];
}
}
dim3 grid((total_row_number + 3) / 4);
dim3 block(32, 4);
PD_VISIT_BASE_INTEGRAL_TYPES(
out.crows().dtype(), "SoftmaxCsrGradKernel", ([&] {
hipLaunchKernelGGL(( SoftmaxGradGpuKernel<T, data_t>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
out.crows().data<data_t>(),
out.values().data<T>(),
dout.values().data<T>(),
dx->mutable_values()->data<T>(),
row_number,
total_row_number);
}));
}
template <typename T, typename IntT>
__global__ void SoftmaxCooGradGPURawKernel(IntT* sorted_pool_indices,
IntT size,
IntT* pool_sizes,
IntT* pool_offsets,
IntT nvalues,
IntT grad_nnz,
IntT* grad_offsets,
IntT* out_offsets,
IntT* lower_bound_values,
T* values,
T* out_values,
T* grad_values,
int total_rows) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= total_rows) return;
int tid = threadIdx.x;
int index = row / nvalues;
int nval = row % nvalues;
IntT offset = pool_offsets[index];
IntT* pool_indices = sorted_pool_indices + offset;
IntT pool_indices_size = pool_sizes[index];
int kIteration = (pool_indices_size + warpSize - 1) / warpSize;
T mul_result = 0;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_out_value = out_values + i * nvalues;
auto j = lower_bound_values[i];
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto cur_grad_value = grad_values + j * nvalues;
mul_result += (*(cur_out_value + nval)) * (*(cur_grad_value + nval));
}
}
T sum = phi::funcs::WarpReduceSum<T>(mul_result, 0xFFFFFFFF);
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto j = lower_bound_values[i];
auto cur_out_value = out_values + i * nvalues;
auto cur_value = values + i * nvalues;
auto cur_grad_value = grad_values + j * nvalues;
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
cur_value[nval] =
(*(cur_out_value + nval)) * (*(cur_grad_value + nval) - sum);
} else {
cur_value[nval] = -(*(cur_out_value + nval)) * sum;
}
}
}
template <typename T, typename IntT, typename Context>
void SoftmaxCooGradGPUKernel(const Context& dev_ctx,
const SparseCooTensor& out,
const SparseCooTensor& dout,
int axis,
SparseCooTensor* dx) {
using thrust_ptr = thrust::device_ptr<IntT>;
auto out_indices = out.indices();
auto out_values = out.values();
auto out_values_ptr = out_values.data<T>();
const auto output_indices_dims = out.indices().dims();
const auto out_dims = out.dims();
auto sparse_dim = out.sparse_dim();
auto sizes = phi::vectorize<IntT>(out_dims);
auto grad_indices = dout.indices();
auto grad_values = dout.values();
auto grad_values_ptr = grad_values.data<T>();
auto out_nnz = out.nnz();
auto grad_nnz = dout.nnz();
auto place = dev_ctx.GetPlace();
auto stream = dev_ctx.stream();
*(dx->mutable_indices()) = out_indices;
DenseTensor* values = dx->mutable_values();
values->Resize(out_dims);
values->set_meta(out_values.meta());
dev_ctx.template Alloc<T>(values);
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, values, static_cast<T>(0.0f));
DenseTensor out_offsets = phi::funcs::sparse::GetOffsets<IntT, Context>(
dev_ctx, out_indices, sizes, static_cast<IntT>(-1));
auto out_offsets_ptr = out_offsets.data<IntT>();
DenseTensor grad_offsets = phi::funcs::sparse::GetOffsets<IntT, Context>(
dev_ctx, grad_indices, sizes, static_cast<IntT>(-1));
auto grad_offsets_ptr = grad_offsets.data<IntT>();
#ifdef PADDLE_WITH_HIP
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
bool is_same_offset = thrust::equal(thrust::hip::par.on(dev_ctx.stream()),
#else
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
bool is_same_offset = thrust::equal(thrust::hip::par.on(dev_ctx.stream()),
#endif
out_offsets_ptr,
out_offsets_ptr + out_offsets.numel(),
grad_offsets_ptr);
int dim = axis < 0 ? out_dims.size() + axis : axis;
if (dim >= sparse_dim) {
PADDLE_ENFORCE_EQ(
is_same_offset,
true,
phi::errors::Unimplemented(
"SparseCooTensor only support same offsets for softmax."));
SoftmaxGradKernel<T, Context>(
dev_ctx, out_values, grad_values, dim - sparse_dim + 1, values);
return;
}
auto nnz = out.nnz();
IntT nvalues = std::accumulate(sizes.begin() + sparse_dim,
sizes.end(),
static_cast<IntT>(1),
std::multiplies<>());
DenseTensor values_2(*values);
values_2.Resize(phi::make_ddim({nnz, nvalues}));
DenseTensor sorted_indices;
DenseTensor pool_offsets;
DenseTensor pool_sizes;
std::tie(sorted_indices, pool_offsets, pool_sizes, std::ignore) =
phi::funcs::sparse::ComputePoolMax<T, IntT, Context, false>(
dev_ctx, out_indices, values_2, sizes, nvalues, dim);
DenseTensor bound =
phi::Empty<IntT>(dev_ctx, {static_cast<IntT>(out_offsets.dims()[0])});
IntT* bound_ptr = bound.data<IntT>();
thrust::lower_bound(policy,
thrust_ptr(grad_offsets_ptr),
thrust_ptr(grad_offsets_ptr + grad_offsets.dims()[0]),
thrust_ptr(out_offsets_ptr),
thrust_ptr(out_offsets_ptr) + out_offsets.dims()[0],
thrust_ptr(bound.data<IntT>()));
auto pool_size = pool_offsets.dims()[0];
int total_rows = pool_size * nvalues;
dim3 grid((total_rows + 15) / 16);
dim3 block(32, 16);
hipLaunchKernelGGL(( SoftmaxCooGradGPURawKernel<T, IntT>)
, dim3(grid), dim3(block), 0, stream, sorted_indices.data<IntT>(),
pool_size,
pool_sizes.data<IntT>(),
pool_offsets.data<IntT>(),
nvalues,
grad_nnz,
grad_offsets.data<IntT>(),
out_offsets.data<IntT>(),
bound_ptr,
values_2.data<T>(),
out_values.data<T>(),
grad_values.data<T>(),
total_rows);
}
template <typename T, typename Context>
void SoftmaxCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& out,
const SparseCooTensor& dout,
int axis,
SparseCooTensor* dx) {
PD_VISIT_BASE_INTEGRAL_TYPES(
out.indices().dtype(), "SoftmaxCooGradGPUKernel", ([&] {
SoftmaxCooGradGPUKernel<T, data_t, Context>(
dev_ctx, out, dout, axis, dx);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(softmax_csr_grad,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCsrGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
PD_REGISTER_KERNEL(softmax_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| 87190ac652a0d610184f4e4e52c0a584d8832e4a.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/softmax_grad_kernel.h"
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/equal.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/sparse/softmax.cu.h"
#include "paddle/phi/kernels/softmax_grad_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void SoftmaxGradGpuKernel(const IntT* out_crows,
const T* out_values,
const T* dout_values,
T* dx_values,
int row_number,
int total_row_number) {
// dx = (dout - sum(dout * out)) * out
int row = blockIdx.x * blockDim.y + threadIdx.y;
int non_zero_idx = threadIdx.x;
if (row >= total_row_number) return;
int cur_batch = row / row_number;
int crow_idx = cur_batch * (row_number + 1) + (row % row_number);
int cur_batch_offset = 0;
for (int i = 1; i < cur_batch + 1; ++i) {
cur_batch_offset += out_crows[i * (row_number + 1) - 1];
}
int row_first = cur_batch_offset + static_cast<int>(out_crows[crow_idx]);
int row_nnz = static_cast<int>(out_crows[crow_idx + 1] - out_crows[crow_idx]);
if (row_nnz == 0) return;
int kIteration = (row_nnz + warpSize - 1) / warpSize;
T mul_result = 0;
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
mul_result += out_values[row_first + idx] * dout_values[row_first + idx];
}
T sum = phi::funcs::WarpReduceSum<T>(mul_result, 0xFFFFFFFF);
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
dx_values[row_first + idx] =
(dout_values[row_first + idx] - sum) * out_values[row_first + idx];
}
}
template <typename T, typename Context>
void SoftmaxCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& out,
const SparseCsrTensor& dout,
int axis,
SparseCsrTensor* dx) {
PADDLE_ENFORCE_EQ(axis,
-1,
phi::errors::Unimplemented(
"SparseCsrTensor only support axis=-1 for softmax, "
"which is faster when reading data by row (axis=-1)"));
EmptyLikeCsrKernel<T, Context>(dev_ctx, dout, dx);
auto out_dim = out.dims();
auto out_rank = out_dim.size();
int total_row_number = 1;
int row_number = 1;
for (int i = 0; i < out_rank - 1; ++i) {
total_row_number *= out_dim[i];
if (i == out_rank - 2) {
row_number = out_dim[i];
}
}
dim3 grid((total_row_number + 3) / 4);
dim3 block(32, 4);
PD_VISIT_BASE_INTEGRAL_TYPES(
out.crows().dtype(), "SoftmaxCsrGradKernel", ([&] {
SoftmaxGradGpuKernel<T, data_t><<<grid, block, 0, dev_ctx.stream()>>>(
out.crows().data<data_t>(),
out.values().data<T>(),
dout.values().data<T>(),
dx->mutable_values()->data<T>(),
row_number,
total_row_number);
}));
}
template <typename T, typename IntT>
__global__ void SoftmaxCooGradGPURawKernel(IntT* sorted_pool_indices,
IntT size,
IntT* pool_sizes,
IntT* pool_offsets,
IntT nvalues,
IntT grad_nnz,
IntT* grad_offsets,
IntT* out_offsets,
IntT* lower_bound_values,
T* values,
T* out_values,
T* grad_values,
int total_rows) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= total_rows) return;
int tid = threadIdx.x;
int index = row / nvalues;
int nval = row % nvalues;
IntT offset = pool_offsets[index];
IntT* pool_indices = sorted_pool_indices + offset;
IntT pool_indices_size = pool_sizes[index];
int kIteration = (pool_indices_size + warpSize - 1) / warpSize;
T mul_result = 0;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_out_value = out_values + i * nvalues;
auto j = lower_bound_values[i];
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto cur_grad_value = grad_values + j * nvalues;
mul_result += (*(cur_out_value + nval)) * (*(cur_grad_value + nval));
}
}
T sum = phi::funcs::WarpReduceSum<T>(mul_result, 0xFFFFFFFF);
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto j = lower_bound_values[i];
auto cur_out_value = out_values + i * nvalues;
auto cur_value = values + i * nvalues;
auto cur_grad_value = grad_values + j * nvalues;
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
cur_value[nval] =
(*(cur_out_value + nval)) * (*(cur_grad_value + nval) - sum);
} else {
cur_value[nval] = -(*(cur_out_value + nval)) * sum;
}
}
}
template <typename T, typename IntT, typename Context>
void SoftmaxCooGradGPUKernel(const Context& dev_ctx,
const SparseCooTensor& out,
const SparseCooTensor& dout,
int axis,
SparseCooTensor* dx) {
using thrust_ptr = thrust::device_ptr<IntT>;
auto out_indices = out.indices();
auto out_values = out.values();
auto out_values_ptr = out_values.data<T>();
const auto output_indices_dims = out.indices().dims();
const auto out_dims = out.dims();
auto sparse_dim = out.sparse_dim();
auto sizes = phi::vectorize<IntT>(out_dims);
auto grad_indices = dout.indices();
auto grad_values = dout.values();
auto grad_values_ptr = grad_values.data<T>();
auto out_nnz = out.nnz();
auto grad_nnz = dout.nnz();
auto place = dev_ctx.GetPlace();
auto stream = dev_ctx.stream();
*(dx->mutable_indices()) = out_indices;
DenseTensor* values = dx->mutable_values();
values->Resize(out_dims);
values->set_meta(out_values.meta());
dev_ctx.template Alloc<T>(values);
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, values, static_cast<T>(0.0f));
DenseTensor out_offsets = phi::funcs::sparse::GetOffsets<IntT, Context>(
dev_ctx, out_indices, sizes, static_cast<IntT>(-1));
auto out_offsets_ptr = out_offsets.data<IntT>();
DenseTensor grad_offsets = phi::funcs::sparse::GetOffsets<IntT, Context>(
dev_ctx, grad_indices, sizes, static_cast<IntT>(-1));
auto grad_offsets_ptr = grad_offsets.data<IntT>();
#ifdef PADDLE_WITH_HIP
const auto& policy = thrust::hip::par.on(dev_ctx.stream());
bool is_same_offset = thrust::equal(thrust::hip::par.on(dev_ctx.stream()),
#else
const auto& policy = thrust::cuda::par.on(dev_ctx.stream());
bool is_same_offset = thrust::equal(thrust::cuda::par.on(dev_ctx.stream()),
#endif
out_offsets_ptr,
out_offsets_ptr + out_offsets.numel(),
grad_offsets_ptr);
int dim = axis < 0 ? out_dims.size() + axis : axis;
if (dim >= sparse_dim) {
PADDLE_ENFORCE_EQ(
is_same_offset,
true,
phi::errors::Unimplemented(
"SparseCooTensor only support same offsets for softmax."));
SoftmaxGradKernel<T, Context>(
dev_ctx, out_values, grad_values, dim - sparse_dim + 1, values);
return;
}
auto nnz = out.nnz();
IntT nvalues = std::accumulate(sizes.begin() + sparse_dim,
sizes.end(),
static_cast<IntT>(1),
std::multiplies<>());
DenseTensor values_2(*values);
values_2.Resize(phi::make_ddim({nnz, nvalues}));
DenseTensor sorted_indices;
DenseTensor pool_offsets;
DenseTensor pool_sizes;
std::tie(sorted_indices, pool_offsets, pool_sizes, std::ignore) =
phi::funcs::sparse::ComputePoolMax<T, IntT, Context, false>(
dev_ctx, out_indices, values_2, sizes, nvalues, dim);
DenseTensor bound =
phi::Empty<IntT>(dev_ctx, {static_cast<IntT>(out_offsets.dims()[0])});
IntT* bound_ptr = bound.data<IntT>();
thrust::lower_bound(policy,
thrust_ptr(grad_offsets_ptr),
thrust_ptr(grad_offsets_ptr + grad_offsets.dims()[0]),
thrust_ptr(out_offsets_ptr),
thrust_ptr(out_offsets_ptr) + out_offsets.dims()[0],
thrust_ptr(bound.data<IntT>()));
auto pool_size = pool_offsets.dims()[0];
int total_rows = pool_size * nvalues;
dim3 grid((total_rows + 15) / 16);
dim3 block(32, 16);
SoftmaxCooGradGPURawKernel<T, IntT>
<<<grid, block, 0, stream>>>(sorted_indices.data<IntT>(),
pool_size,
pool_sizes.data<IntT>(),
pool_offsets.data<IntT>(),
nvalues,
grad_nnz,
grad_offsets.data<IntT>(),
out_offsets.data<IntT>(),
bound_ptr,
values_2.data<T>(),
out_values.data<T>(),
grad_values.data<T>(),
total_rows);
}
template <typename T, typename Context>
void SoftmaxCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& out,
const SparseCooTensor& dout,
int axis,
SparseCooTensor* dx) {
PD_VISIT_BASE_INTEGRAL_TYPES(
out.indices().dtype(), "SoftmaxCooGradGPUKernel", ([&] {
SoftmaxCooGradGPUKernel<T, data_t, Context>(
dev_ctx, out, dout, axis, dx);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(softmax_csr_grad,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCsrGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
PD_REGISTER_KERNEL(softmax_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCooGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
19998af7e82d3fdd2d8a9c95ac4e43f2b18e0e16.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <cmath>
#include <hip/hip_runtime.h>
using namespace std;
using namespace cv;
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
uchar * filter(uchar * data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
__device__ int getGlobalIdx_3D_3D();
__device__ int getblockthreadIdx();
int* uchartoint(uchar* data, int size){
int* buff = (int*)malloc(sizeof(int)*size);
for(int i=0;i<size;i++){
*(buff+i)=(int)*(data+i);
}
return buff;
}
uchar* inttouchar(int* data, int size){
uchar* buff = (uchar*)malloc(sizeof(uchar)*size);
for(int i=0;i<size;i++){
*(buff+i)=(unsigned char)*(data+i);
}
return buff;
}
__global__
void convolution(int* data,int* buff,float* kernel,int* outputvars,int rows,int cols,int channels,int kerneldim){
int idx = getGlobalIdx_3D_3D();
int kernelmid;
extern __shared__ float sharedKernel[];
float *kernelCenter;
if (getblockthreadIdx()<kerneldim*kerneldim){
*(sharedKernel+getblockthreadIdx())=*(kernel+getblockthreadIdx());
}
__syncthreads();
/*
if (getblockthreadIdx()<kerneldim*kerneldim){
printf("%d %f\n",getblockthreadIdx(),*(sharedKernel+getblockthreadIdx()));
}
__syncthreads();
*/
kernelmid = kerneldim%2==1?kerneldim/2:(kerneldim-1)/2;
kernelCenter=sharedKernel+(((kerneldim+1)*kernelmid));
int row = idx / (cols*channels);
int col = (idx%(cols*channels))/channels;
float value=0;
int pixel=0;
float kernelVal=0;
int pixelmin=INT_MAX,pixelmax=INT_MIN;
int kernelmidHalf=(kerneldim/2);
if (col>0 && row>0 && row<rows-1 && col<cols-1){
data = data+idx;
//r<=(kernelmidHalf) no funciona, no s porque, pero cuda y yo tenemos un problema.
for(int r = (-1*kernelmidHalf); r<(kernelmidHalf+1);r++){
for(int c = -1*kernelmidHalf; c<(kernelmidHalf+1);c++){
pixel=*(data+(r*cols*channels)+(c*channels));
kernelVal=*(kernelCenter+(r*-1*kerneldim)+(c*-1));
value+=kernelVal*pixel;
if (pixel<pixelmin){
pixelmin=pixel;
}
if (pixel>pixelmax){
pixelmax=pixel;
}
}
}
*(buff+idx)=value;
atomicMin(outputvars,value);
atomicMax(outputvars+1,value);
atomicMin(outputvars+2,pixelmin);
atomicMax(outputvars+3,pixelmax);
}
//__syncthreads();
/*if (col>0 && row>0 && row<rows-1 && col<cols-1 && getblockthreadIdx()==0){
printf("%d %d %d %d\n",*(outputvars),*(outputvars+1),*(outputvars+2),*(outputvars+3));
}*/
}
__global__
void normalize(int* data,int channels, int rows, int cols,int min, int max, int newMin, int newMax, int mode){
int pixval=0;
int i = getGlobalIdx_3D_3D();
int row = i / (cols*channels);
int col = (i%(cols*channels))/channels;
if (row>0 && col>0 && row<rows-1 && col<cols-1){
pixval=*(data+i);
if (mode==1){
*(data+i)=(pixval-min)*((newMax-newMin*1.0)/(max-min))+newMin;
}else{
*(data+i)=pixval>newMax?newMax:pixval<newMin?newMin:pixval;
}
}
// __syncthreads();
}
uchar * edge1(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,0,-1},
{0,0,0},
{-1,0,1}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge2(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,1,0},
{1,-4,1},
{0,1,0}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge3(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * sharpen(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,-1,0},
{-1,5,-1},
{0,-1,0},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * boxblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,1,1},
{1,1,1},
{1,1,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
uchar * gaussianblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,2,1},
{2,4,2},
{1,2,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
int * _sobelx(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,0,-1},
{2,0,-2},
{1,0,-1},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
uchar * sobelx(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobelx(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
int * _sobely(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,2,1},
{0,0,0},
{-1,-2,-1},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
int * _sobelx10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,0,-3},
{10,0,-10},
{3,0,-3},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
int * _sobely10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,10,3},
{0,0,0},
{-3,-10,-3},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
uchar * sobely(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobely(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
__global__
void sobelKernel(int *a, int*b,int* output,int* outputvars,int n){
int i = getGlobalIdx_3D_3D();
if (i>=n){return;}
int val=sqrtf((*(a+i))*(*(a+i))+(*(b+i))*(*(b+i)));
*(output+i)=val;
atomicMin(outputvars,val);
atomicMax(outputvars+1,val);
}
uchar * sobel(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
hipMalloc(&minmaxs,sizeof(int)*2);
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
hipMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx(d_data,channels,rows,cols,-1);
int * filtery = _sobely(d_data,channels,rows,cols,-1);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
hipLaunchKernelGGL(( sobelKernel), dim3(ceil((rows*cols*channels)/256.0)),dim3(256), 0, 0, filterx,filtery,d_output,minmaxs,rows*cols*channels);
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( normalize), dim3(ceil((rows*cols*channels)/256.0)),dim3(256), 0, 0, d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(minmaxs);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
hipFree(filterx);
hipFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobel10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
hipMalloc(&minmaxs,sizeof(int)*2);
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
hipMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx10(d_data,channels,rows,cols,-1);
int * filtery = _sobely10(d_data,channels,rows,cols,-1);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
hipLaunchKernelGGL(( sobelKernel), dim3(ceil((rows*cols*channels)/256.0)),dim3(256), 0, 0, filterx,filtery,d_output,minmaxs,rows*cols*channels);
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( normalize), dim3(ceil((rows*cols*channels)/256.0)),dim3(256), 0, 0, d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(minmaxs);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
hipFree(filterx);
hipFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobely10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobely10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
uchar * sobelx10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobelx10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
__global__
void kernelNormAdd(float* kernel,float* output, int kernelNormalize){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float kernelVal=*((float*)kernel+i);
atomicAdd(output+(kernelVal>=0),kernelVal);
__syncthreads();
if (kernelNormalize==1){
*(kernel+i)=kernelVal/(*output+*(output+1));
}else{
*(kernel+i)=kernelVal/(*(output+(kernelVal>=0)));
}
__syncthreads();
}
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* buff,*minmaxs;
hipMalloc(&buff,sizeof(int)*channels*rows*cols);
hipMalloc(&minmaxs,sizeof(int)*4);
hipMemset(buff,0,sizeof(int)*channels*rows*cols);
if (kernelNormalize){
float* sumKernel;
hipMalloc(&sumKernel,sizeof(float)*2);
hipMemset(sumKernel,0,sizeof(float)*2);
hipLaunchKernelGGL(( kernelNormAdd), dim3(1),dim3(9), 0, 0, kernel,sumKernel,kernelNormalize);
hipFree(sumKernel);
}
int N = rows*cols*channels;
int ssize = (sizeof(float)*kerneldim*kerneldim);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
hipMemset(minmaxs+2,INT_MAX,1);
hipMemset(minmaxs+3,INT_MIN,1);
hipLaunchKernelGGL(( convolution), dim3(ceil(N/256.0)),dim3(256),ssize, 0, data,buff,kernel,minmaxs,rows,cols,channels,kerneldim);
hipError_t err=hipGetLastError();
if ( hipSuccess != err ){
printf( "Error!\n" );
printf("GPUassert: %s\n", hipGetErrorString(err));
}
if (outputNormalizationMode>=0){
int* tmpMinMax = (int*)malloc(sizeof(int)*4);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*4, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3));
hipLaunchKernelGGL(( normalize), dim3(ceil(N/256)),dim3(256), 0, 0, buff,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3),outputNormalizationMode);
free(tmpMinMax);
}
hipFree(minmaxs);
return buff;
}
uchar* filter(uchar* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* d_output = _filter(d_data,channels,rows,cols,d_kernel,kerneldim,kernelNormalize,outputNormalizationMode);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
return out;
}
int main(int argc, char** argv){
if (argc<3){
cout<<"./nombre imagen filtro"<<endl;
return 0;
}
char* nfiltro=*(argv+2);
uchar* (*filtro)(uchar*,int,int,int)=0;
if(strcmp(nfiltro,"sobel")==0) filtro=sobel;
if(strcmp(nfiltro,"sobelx")==0) filtro=sobelx;
if(strcmp(nfiltro,"sobely")==0) filtro=sobely;
if(strcmp(nfiltro,"sobel10")==0) filtro=sobel10;
if(strcmp(nfiltro,"sobelx10")==0) filtro=sobelx10;
if(strcmp(nfiltro,"sobely10")==0) filtro=sobely10;
if(strcmp(nfiltro,"edge1")==0) filtro=edge1;
if(strcmp(nfiltro,"edge2")==0) filtro=edge2;
if(strcmp(nfiltro,"edge3")==0) filtro=edge3;
if(strcmp(nfiltro,"boxblur")==0) filtro=boxblur;
if(strcmp(nfiltro,"gaussianblur")==0) filtro=gaussianblur;
if(strcmp(nfiltro,"sharpen")==0) filtro=sharpen;
if (filtro==0){
cout<<"metodo erroneo"<<endl;
return 1;
}
Mat image;
image = imread(*(argv+1), CV_LOAD_IMAGE_COLOR);
Mat m1;
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
m1 = Mat (image);
m1.data=filtro(image.data,3,image.rows,image.cols);
namedWindow( "original", WINDOW_AUTOSIZE );
imshow( "original", image );
namedWindow( "filter", WINDOW_AUTOSIZE );
imshow( "filter", m1 );
waitKey(); // Wait for a keystroke in the window
return 0;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
__device__ int getblockthreadIdx(){
return (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
}
| 19998af7e82d3fdd2d8a9c95ac4e43f2b18e0e16.cu | #include <stdio.h>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <cmath>
#include <cuda.h>
using namespace std;
using namespace cv;
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
uchar * filter(uchar * data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
__device__ int getGlobalIdx_3D_3D();
__device__ int getblockthreadIdx();
int* uchartoint(uchar* data, int size){
int* buff = (int*)malloc(sizeof(int)*size);
for(int i=0;i<size;i++){
*(buff+i)=(int)*(data+i);
}
return buff;
}
uchar* inttouchar(int* data, int size){
uchar* buff = (uchar*)malloc(sizeof(uchar)*size);
for(int i=0;i<size;i++){
*(buff+i)=(unsigned char)*(data+i);
}
return buff;
}
__global__
void convolution(int* data,int* buff,float* kernel,int* outputvars,int rows,int cols,int channels,int kerneldim){
int idx = getGlobalIdx_3D_3D();
int kernelmid;
extern __shared__ float sharedKernel[];
float *kernelCenter;
if (getblockthreadIdx()<kerneldim*kerneldim){
*(sharedKernel+getblockthreadIdx())=*(kernel+getblockthreadIdx());
}
__syncthreads();
/*
if (getblockthreadIdx()<kerneldim*kerneldim){
printf("%d %f\n",getblockthreadIdx(),*(sharedKernel+getblockthreadIdx()));
}
__syncthreads();
*/
kernelmid = kerneldim%2==1?kerneldim/2:(kerneldim-1)/2;
kernelCenter=sharedKernel+(((kerneldim+1)*kernelmid));
int row = idx / (cols*channels);
int col = (idx%(cols*channels))/channels;
float value=0;
int pixel=0;
float kernelVal=0;
int pixelmin=INT_MAX,pixelmax=INT_MIN;
int kernelmidHalf=(kerneldim/2);
if (col>0 && row>0 && row<rows-1 && col<cols-1){
data = data+idx;
//r<=(kernelmidHalf) no funciona, no sé porque, pero cuda y yo tenemos un problema.
for(int r = (-1*kernelmidHalf); r<(kernelmidHalf+1);r++){
for(int c = -1*kernelmidHalf; c<(kernelmidHalf+1);c++){
pixel=*(data+(r*cols*channels)+(c*channels));
kernelVal=*(kernelCenter+(r*-1*kerneldim)+(c*-1));
value+=kernelVal*pixel;
if (pixel<pixelmin){
pixelmin=pixel;
}
if (pixel>pixelmax){
pixelmax=pixel;
}
}
}
*(buff+idx)=value;
atomicMin(outputvars,value);
atomicMax(outputvars+1,value);
atomicMin(outputvars+2,pixelmin);
atomicMax(outputvars+3,pixelmax);
}
//__syncthreads();
/*if (col>0 && row>0 && row<rows-1 && col<cols-1 && getblockthreadIdx()==0){
printf("%d %d %d %d\n",*(outputvars),*(outputvars+1),*(outputvars+2),*(outputvars+3));
}*/
}
__global__
void normalize(int* data,int channels, int rows, int cols,int min, int max, int newMin, int newMax, int mode){
int pixval=0;
int i = getGlobalIdx_3D_3D();
int row = i / (cols*channels);
int col = (i%(cols*channels))/channels;
if (row>0 && col>0 && row<rows-1 && col<cols-1){
pixval=*(data+i);
if (mode==1){
*(data+i)=(pixval-min)*((newMax-newMin*1.0)/(max-min))+newMin;
}else{
*(data+i)=pixval>newMax?newMax:pixval<newMin?newMin:pixval;
}
}
// __syncthreads();
}
uchar * edge1(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,0,-1},
{0,0,0},
{-1,0,1}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge2(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,1,0},
{1,-4,1},
{0,1,0}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge3(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * sharpen(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,-1,0},
{-1,5,-1},
{0,-1,0},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * boxblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,1,1},
{1,1,1},
{1,1,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
uchar * gaussianblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,2,1},
{2,4,2},
{1,2,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
int * _sobelx(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,0,-1},
{2,0,-2},
{1,0,-1},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
uchar * sobelx(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobelx(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
int * _sobely(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,2,1},
{0,0,0},
{-1,-2,-1},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
int * _sobelx10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,0,-3},
{10,0,-10},
{3,0,-3},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
int * _sobely10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,10,3},
{0,0,0},
{-3,-10,-3},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
uchar * sobely(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobely(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
__global__
void sobelKernel(int *a, int*b,int* output,int* outputvars,int n){
int i = getGlobalIdx_3D_3D();
if (i>=n){return;}
int val=sqrtf((*(a+i))*(*(a+i))+(*(b+i))*(*(b+i)));
*(output+i)=val;
atomicMin(outputvars,val);
atomicMax(outputvars+1,val);
}
uchar * sobel(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
cudaMalloc(&minmaxs,sizeof(int)*2);
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
cudaMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx(d_data,channels,rows,cols,-1);
int * filtery = _sobely(d_data,channels,rows,cols,-1);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
sobelKernel<<<ceil((rows*cols*channels)/256.0),256>>>(filterx,filtery,d_output,minmaxs,rows*cols*channels);
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, cudaMemcpyDeviceToHost);
normalize<<<ceil((rows*cols*channels)/256.0),256>>>(d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(minmaxs);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
cudaFree(filterx);
cudaFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobel10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
cudaMalloc(&minmaxs,sizeof(int)*2);
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
cudaMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx10(d_data,channels,rows,cols,-1);
int * filtery = _sobely10(d_data,channels,rows,cols,-1);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
sobelKernel<<<ceil((rows*cols*channels)/256.0),256>>>(filterx,filtery,d_output,minmaxs,rows*cols*channels);
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, cudaMemcpyDeviceToHost);
normalize<<<ceil((rows*cols*channels)/256.0),256>>>(d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(minmaxs);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
cudaFree(filterx);
cudaFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobely10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobely10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
uchar * sobelx10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobelx10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
__global__
void kernelNormAdd(float* kernel,float* output, int kernelNormalize){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float kernelVal=*((float*)kernel+i);
atomicAdd(output+(kernelVal>=0),kernelVal);
__syncthreads();
if (kernelNormalize==1){
*(kernel+i)=kernelVal/(*output+*(output+1));
}else{
*(kernel+i)=kernelVal/(*(output+(kernelVal>=0)));
}
__syncthreads();
}
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* buff,*minmaxs;
cudaMalloc(&buff,sizeof(int)*channels*rows*cols);
cudaMalloc(&minmaxs,sizeof(int)*4);
cudaMemset(buff,0,sizeof(int)*channels*rows*cols);
if (kernelNormalize){
float* sumKernel;
cudaMalloc(&sumKernel,sizeof(float)*2);
cudaMemset(sumKernel,0,sizeof(float)*2);
kernelNormAdd<<<1,9>>>(kernel,sumKernel,kernelNormalize);
cudaFree(sumKernel);
}
int N = rows*cols*channels;
int ssize = (sizeof(float)*kerneldim*kerneldim);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
cudaMemset(minmaxs+2,INT_MAX,1);
cudaMemset(minmaxs+3,INT_MIN,1);
convolution<<<ceil(N/256.0),256,ssize>>>(data,buff,kernel,minmaxs,rows,cols,channels,kerneldim);
cudaError_t err=cudaGetLastError();
if ( cudaSuccess != err ){
printf( "Error!\n" );
printf("GPUassert: %s\n", cudaGetErrorString(err));
}
if (outputNormalizationMode>=0){
int* tmpMinMax = (int*)malloc(sizeof(int)*4);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*4, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3));
normalize<<<ceil(N/256),256>>>(buff,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3),outputNormalizationMode);
free(tmpMinMax);
}
cudaFree(minmaxs);
return buff;
}
uchar* filter(uchar* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* d_output = _filter(d_data,channels,rows,cols,d_kernel,kerneldim,kernelNormalize,outputNormalizationMode);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
return out;
}
int main(int argc, char** argv){
if (argc<3){
cout<<"./nombre imagen filtro"<<endl;
return 0;
}
char* nfiltro=*(argv+2);
uchar* (*filtro)(uchar*,int,int,int)=0;
if(strcmp(nfiltro,"sobel")==0) filtro=sobel;
if(strcmp(nfiltro,"sobelx")==0) filtro=sobelx;
if(strcmp(nfiltro,"sobely")==0) filtro=sobely;
if(strcmp(nfiltro,"sobel10")==0) filtro=sobel10;
if(strcmp(nfiltro,"sobelx10")==0) filtro=sobelx10;
if(strcmp(nfiltro,"sobely10")==0) filtro=sobely10;
if(strcmp(nfiltro,"edge1")==0) filtro=edge1;
if(strcmp(nfiltro,"edge2")==0) filtro=edge2;
if(strcmp(nfiltro,"edge3")==0) filtro=edge3;
if(strcmp(nfiltro,"boxblur")==0) filtro=boxblur;
if(strcmp(nfiltro,"gaussianblur")==0) filtro=gaussianblur;
if(strcmp(nfiltro,"sharpen")==0) filtro=sharpen;
if (filtro==0){
cout<<"metodo erroneo"<<endl;
return 1;
}
Mat image;
image = imread(*(argv+1), CV_LOAD_IMAGE_COLOR);
Mat m1;
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
m1 = Mat (image);
m1.data=filtro(image.data,3,image.rows,image.cols);
namedWindow( "original", WINDOW_AUTOSIZE );
imshow( "original", image );
namedWindow( "filter", WINDOW_AUTOSIZE );
imshow( "filter", m1 );
waitKey(); // Wait for a keystroke in the window
return 0;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
__device__ int getblockthreadIdx(){
return (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
}
|
c268e47dd548c4d8398d05a42af542e6a608c024.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_func.h"
__global__ void init_kernel(int64_t n, float *x, float val)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = index; i < n; i += stride) {
x[i] = val;
}
}
__global__ void reduce_kernel(int64_t n, float *x, float *y)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
void gpu_mem_alloc(float **x, int64_t n)
{
hipMallocManaged(x, n*sizeof(float));
hipDeviceSynchronize();
}
void gpu_init(int64_t n, float *x, float val)
{
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( init_kernel), dim3(numBlocks), dim3(blockSize), 0, 0, n, x, val);
hipDeviceSynchronize();
}
void gpu_reduce(int64_t n, float *x, float *y)
{
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(blockSize), 0, 0, n, x, y);
hipDeviceSynchronize();
} | c268e47dd548c4d8398d05a42af542e6a608c024.cu | #include "cuda_func.h"
__global__ void init_kernel(int64_t n, float *x, float val)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = index; i < n; i += stride) {
x[i] = val;
}
}
__global__ void reduce_kernel(int64_t n, float *x, float *y)
{
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
void gpu_mem_alloc(float **x, int64_t n)
{
cudaMallocManaged(x, n*sizeof(float));
cudaDeviceSynchronize();
}
void gpu_init(int64_t n, float *x, float val)
{
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
init_kernel<<<numBlocks, blockSize>>>(n, x, val);
cudaDeviceSynchronize();
}
void gpu_reduce(int64_t n, float *x, float *y)
{
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
reduce_kernel<<<numBlocks, blockSize>>>(n, x, y);
cudaDeviceSynchronize();
} |
809d2ce3a9a5e568fb9d0a4a18982f67e746d8e6.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=2 --gridDim=1 --no-inline
//Write by thread .+kernel\.cu:8:21:
#include <hip/hip_runtime.h>
__global__ void curand_test(hiprandState_t *state, float *A) {
A[threadIdx.x] = hiprand_uniform(state);
}
| 809d2ce3a9a5e568fb9d0a4a18982f67e746d8e6.cu | //xfail:BOOGIE_ERROR
//--blockDim=2 --gridDim=1 --no-inline
//Write by thread .+kernel\.cu:8:21:
#include <cuda.h>
__global__ void curand_test(curandState *state, float *A) {
A[threadIdx.x] = curand_uniform(state);
}
|
a0f8263fc01aeba7bdcb13a3297202a6c899f154.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lusol.h"
__global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail) {
int gid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (gid >= n) {
return;
}
if (dp[gid] == 0) {
int p = atomicAdd(tail, 1);
jlev[p] = gid + 1;
}
}
__global__ void LEVEL_CSC_SYNC_L(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int count, int head, int *tail) {
// global warp id
const int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
if (wid >= count) {
return;
}
int i = jlev[head+wid] - 1;
int q1 = db[i] + 1;
int q2 = ib[i+1];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(tail, 1);
jlev[p] = k + 1;
}
}
}
__global__ void LEVEL_CSC_SYNC_U(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int count, int head, int *tail) {
// global warp id
const int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
if (wid >= count) {
return;
}
int i = jlev[head+wid] - 1;
int q1 = ib[i];
int q2 = db[i];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(tail, 1);
jlev[p] = k + 1;
}
}
}
void makeLevelCSC_SYNC(int n, int *d_ib, int *d_jb, int *d_db,
int *d_dp,
int *d_jlevL, int *ilevL, int *nlevL,
int *d_jlevU, int *ilevU, int *nlevU) {
int gDim,lev;
int *d_tail, tail, head;
int *d_dpL = d_dp;
int *d_dpU = d_dp + n;
hipMalloc((void **)&d_tail, sizeof(int));
// L
lev = 0;
ilevL[lev++] = 1;
head = 0;
hipMemset(d_tail, 0, sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpL, d_jlevL, d_tail);
hipMemcpy(&tail, d_tail, sizeof(int), hipMemcpyDeviceToHost);
while (tail < n) {
int count = tail - head;
int nthreads = count * WARP;
int gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_L), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpL, d_jlevL, count, head, d_tail);
head = tail;
hipMemcpy(&tail, d_tail, sizeof(int), hipMemcpyDeviceToHost);
ilevL[lev++] = head + 1;
}
ilevL[lev] = n + 1;
*nlevL = lev;
// U
lev = 0;
ilevU[lev++] = 1;
head = 0;
hipMemset(d_tail, 0, sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpU, d_jlevU, d_tail);
hipMemcpy(&tail, d_tail, sizeof(int), hipMemcpyDeviceToHost);
while (tail < n) {
int count = tail - head;
int nthreads = count * WARP;
int gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LEVEL_CSC_SYNC_U), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpU, d_jlevU, count, head, d_tail);
head = tail;
hipMemcpy(&tail, d_tail, sizeof(int), hipMemcpyDeviceToHost);
ilevU[lev++] = head + 1;
}
ilevU[lev] = n + 1;
*nlevU = lev;
hipFree(d_tail);
}
| a0f8263fc01aeba7bdcb13a3297202a6c899f154.cu | #include "lusol.h"
__global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail) {
int gid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (gid >= n) {
return;
}
if (dp[gid] == 0) {
int p = atomicAdd(tail, 1);
jlev[p] = gid + 1;
}
}
__global__ void LEVEL_CSC_SYNC_L(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int count, int head, int *tail) {
// global warp id
const int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
if (wid >= count) {
return;
}
int i = jlev[head+wid] - 1;
int q1 = db[i] + 1;
int q2 = ib[i+1];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(tail, 1);
jlev[p] = k + 1;
}
}
}
__global__ void LEVEL_CSC_SYNC_U(int n, int *ib, int *jb, int *db, int *dp,
int *jlev, int count, int head, int *tail) {
// global warp id
const int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// thread lane in each warp
const int lane = threadIdx.x & (WARP - 1);
if (wid >= count) {
return;
}
int i = jlev[head+wid] - 1;
int q1 = ib[i];
int q2 = db[i];
for (int j = q1 + lane; j < q2; j += WARP) {
int k = jb[j-1]-1;
int old = atomicSub(&dp[k], 1);
if (old == 1) {
int p = atomicAdd(tail, 1);
jlev[p] = k + 1;
}
}
}
void makeLevelCSC_SYNC(int n, int *d_ib, int *d_jb, int *d_db,
int *d_dp,
int *d_jlevL, int *ilevL, int *nlevL,
int *d_jlevU, int *ilevU, int *nlevU) {
int gDim,lev;
int *d_tail, tail, head;
int *d_dpL = d_dp;
int *d_dpU = d_dp + n;
cudaMalloc((void **)&d_tail, sizeof(int));
// L
lev = 0;
ilevL[lev++] = 1;
head = 0;
cudaMemset(d_tail, 0, sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpL, d_jlevL, d_tail);
cudaMemcpy(&tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost);
while (tail < n) {
int count = tail - head;
int nthreads = count * WARP;
int gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_L<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpL, d_jlevL, count, head, d_tail);
head = tail;
cudaMemcpy(&tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost);
ilevL[lev++] = head + 1;
}
ilevL[lev] = n + 1;
*nlevL = lev;
// U
lev = 0;
ilevU[lev++] = 1;
head = 0;
cudaMemset(d_tail, 0, sizeof(int));
gDim = (n + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpU, d_jlevU, d_tail);
cudaMemcpy(&tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost);
while (tail < n) {
int count = tail - head;
int nthreads = count * WARP;
int gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM;
LEVEL_CSC_SYNC_U<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpU, d_jlevU, count, head, d_tail);
head = tail;
cudaMemcpy(&tail, d_tail, sizeof(int), cudaMemcpyDeviceToHost);
ilevU[lev++] = head + 1;
}
ilevU[lev] = n + 1;
*nlevU = lev;
cudaFree(d_tail);
}
|
7c30c9525e7574fff1a77f0244cedfc1bea6eab6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Facebook, Inc. and its affiliates.
#include <hip/hip_runtime_api.h>
namespace d2dev {
int get_cudart_version() {
// Not a ROCM platform: Either HIP is not used, or
// it is used, but platform is not ROCM (i.e. it is CUDA)
#if !defined(__HIP_PLATFORM_HCC__)
return CUDART_VERSION;
#else
int version = 0;
#if HIP_VERSION_MAJOR != 0
// Create a convention similar to that of CUDA, as assumed by other
// parts of the code.
version = HIP_VERSION_MINOR;
version += (HIP_VERSION_MAJOR * 100);
#else
hipRuntimeGetVersion(&version);
#endif
return version;
#endif
}
} // namespace detectron2
| 7c30c9525e7574fff1a77f0244cedfc1bea6eab6.cu | // Copyright (c) Facebook, Inc. and its affiliates.
#include <cuda_runtime_api.h>
namespace d2dev {
int get_cudart_version() {
// Not a ROCM platform: Either HIP is not used, or
// it is used, but platform is not ROCM (i.e. it is CUDA)
#if !defined(__HIP_PLATFORM_HCC__)
return CUDART_VERSION;
#else
int version = 0;
#if HIP_VERSION_MAJOR != 0
// Create a convention similar to that of CUDA, as assumed by other
// parts of the code.
version = HIP_VERSION_MINOR;
version += (HIP_VERSION_MAJOR * 100);
#else
hipRuntimeGetVersion(&version);
#endif
return version;
#endif
}
} // namespace detectron2
|
a2c88b4f76b3834f495a0ff02bf9bc67b67cc7df.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/logical.h>
#include <algorithm>
#include <cmath>
namespace cudf {
namespace detail {
/*
* unique_copy copies elements from the range [first, last) to a range beginning
* with output, except that in a consecutive group of duplicate elements only
* depending on last argument keep, only the first one is copied, or the last
* one is copied or neither is copied. The return value is the end of the range
* to which the elements are copied.
*/
template <typename Exec, typename InputIterator, typename OutputIterator, typename BinaryPredicate>
OutputIterator unique_copy(Exec&& exec,
InputIterator first,
InputIterator last,
OutputIterator output,
BinaryPredicate comp,
const duplicate_keep_option keep)
{
size_type last_index = thrust::distance(first, last) - 1;
if (keep == duplicate_keep_option::KEEP_NONE) {
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index] __device__(size_type i) {
return (i == 0 || !comp(first[i], first[i - 1])) &&
(i == last_index || !comp(first[i], first[i + 1]));
});
} else {
size_type offset = 1;
if (keep == duplicate_keep_option::KEEP_FIRST) {
last_index = 0;
offset = -1;
}
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index, offset] __device__(size_type i) {
return (i == last_index || !comp(first[i], first[i + offset]));
});
}
}
/**
* @brief Create a column_view of index values which represent the row values
* without duplicates as per @p `keep`
*
* Given a `keys` table_view, each row index is copied to output `unique_indices`, if the
* corresponding row of `keys` table_view is unique, where the definition of unique depends on the
* value of @p keep:
* - KEEP_FIRST: only the first of a sequence of duplicate rows is copied
* - KEEP_LAST: only the last of a sequence of duplicate rows is copied
* - KEEP_NONE: only unique rows are kept
*
* @param[in] keys table_view to identify duplicate rows
* @param[out] unique_indices Column to store the index with unique rows
* @param[in] keep keep first entry, last entry, or no entries if duplicates found
* @param[in] nulls_equal flag to denote nulls are equal if null_equality::EQUAL,
* nulls are not equal if null_equality::UNEQUAL
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return column_view column_view of unique row index as per specified `keep`, this is actually
* slice of `unique_indices`.
*/
column_view get_unique_ordered_indices(cudf::table_view const& keys,
cudf::mutable_column_view& unique_indices,
duplicate_keep_option keep,
null_equality nulls_equal,
hipStream_t stream = 0)
{
// sort only indices
auto sorted_indices = sorted_order(
keys, std::vector<order>{}, std::vector<null_order>{}, rmm::mr::get_default_resource(), stream);
// extract unique indices
auto device_input_table = cudf::table_device_view::create(keys, stream);
if (cudf::has_nulls(keys)) {
auto comp = row_equality_comparator<true>(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::detail::slice(
column_view(unique_indices),
0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
} else {
auto comp = row_equality_comparator<false>(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::detail::slice(
column_view(unique_indices),
0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
}
}
cudf::size_type unique_count(table_view const& keys,
null_equality nulls_equal = null_equality::EQUAL,
hipStream_t stream = 0)
{
// sort only indices
auto sorted_indices = sorted_order(
keys, std::vector<order>{}, std::vector<null_order>{}, rmm::mr::get_default_resource(), stream);
// count unique elements
auto sorted_row_index = sorted_indices->view().data<cudf::size_type>();
auto device_input_table = cudf::table_device_view::create(keys, stream);
if (cudf::has_nulls(keys)) {
row_equality_comparator<true> comp(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
return thrust::count_if(
rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp] __device__(cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i - 1]));
});
} else {
row_equality_comparator<false> comp(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
return thrust::count_if(
rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp] __device__(cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i - 1]));
});
}
}
std::unique_ptr<table> drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (0 == input.num_rows() || 0 == input.num_columns() || 0 == keys.size()) {
return empty_like(input);
}
auto keys_view = input.select(keys);
// The values will be filled into this column
auto unique_indices = cudf::make_numeric_column(
data_type{INT32}, keys_view.num_rows(), mask_state::UNALLOCATED, stream);
auto mutable_unique_indices_view = unique_indices->mutable_view();
// This is just slice of `unique_indices` but with different size as per the
// keys_view has been processed in `get_unique_ordered_indices`
auto unique_indices_view = detail::get_unique_ordered_indices(
keys_view, mutable_unique_indices_view, keep, nulls_equal, stream);
// run gather operation to establish new order
return detail::gather(input,
unique_indices_view,
detail::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
}
/**
* @brief Functor to check for `NAN` at an index in a `column_device_view`.
*
* @tparam T The type of `column_device_view`
*/
template <typename T>
struct check_for_nan {
/*
* @brief Construct from a column_device_view.
*
* @param[in] input The `column_device_view`
*/
check_for_nan(cudf::column_device_view input) : _input{input} {}
/**
* @brief Operator to be called to check for `NAN` at `index` in `_input`
*
* @param[in] index The index at which the `NAN` needs to be checked in `input`
*
* @returns bool true if value at `index` is `NAN` and not null, else false
*/
__device__ bool operator()(size_type index)
{
return std::isnan(_input.data<T>()[index]) and _input.is_valid(index);
}
protected:
cudf::column_device_view _input;
};
/**
* @brief A structure to be used along with type_dispatcher to check if a
* `column_view` has `NAN`.
*/
struct has_nans {
/**
* @brief Checks if `input` has `NAN`
*
* @note This will be applicable only for floating point type columns.
*
* @param[in] input The `column_view` which will be checked for `NAN`
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @returns bool true if `input` has `NAN` else false
*/
template <typename T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
bool operator()(column_view const& input, hipStream_t stream)
{
auto input_device_view = cudf::column_device_view::create(input, stream);
auto device_view = *input_device_view;
auto count = thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(input.size()),
check_for_nan<T>(device_view));
return count > 0;
}
/**
* @brief Checks if `input` has `NAN`
*
* @note This will be applicable only for non-floating point type columns. And
* non-floating point columns can never have `NAN`, so it will always return
* false
*
* @param[in] input The `column_view` which will be checked for `NAN`
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @returns bool Always false as non-floating point columns can't have `NAN`
*/
template <typename T, std::enable_if_t<not std::is_floating_point<T>::value>* = nullptr>
bool operator()(column_view const& input, hipStream_t stream)
{
return false;
}
};
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling,
hipStream_t stream)
{
if (0 == input.size() || input.null_count() == input.size()) { return 0; }
cudf::size_type nrows = input.size();
bool has_nan = false;
// Check for Nans
// Checking for nulls in input and flag nan_handling, as the count will
// only get affected if these two conditions are true. NAN will only be
// be an extra if nan_handling was NAN_IS_NULL and input also had null, which
// will increase the count by 1.
if (input.has_nulls() and nan_handling == nan_policy::NAN_IS_NULL) {
has_nan = cudf::type_dispatcher(input.type(), has_nans{}, input, stream);
}
auto count = detail::unique_count(table_view{{input}}, null_equality::EQUAL, stream);
// if nan is considered null and there are already null values
if (nan_handling == nan_policy::NAN_IS_NULL and has_nan and input.has_nulls()) --count;
if (null_handling == null_policy::EXCLUDE and input.has_nulls())
return --count;
else
return count;
}
} // namespace detail
std::unique_ptr<table> drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const keep,
null_equality nulls_equal,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::drop_duplicates(input, keys, keep, nulls_equal, mr);
}
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling)
{
CUDF_FUNC_RANGE();
return detail::unique_count(input, null_handling, nan_handling);
}
} // namespace cudf
| a2c88b4f76b3834f495a0ff02bf9bc67b67cc7df.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/logical.h>
#include <algorithm>
#include <cmath>
namespace cudf {
namespace detail {
/*
* unique_copy copies elements from the range [first, last) to a range beginning
* with output, except that in a consecutive group of duplicate elements only
* depending on last argument keep, only the first one is copied, or the last
* one is copied or neither is copied. The return value is the end of the range
* to which the elements are copied.
*/
template <typename Exec, typename InputIterator, typename OutputIterator, typename BinaryPredicate>
OutputIterator unique_copy(Exec&& exec,
InputIterator first,
InputIterator last,
OutputIterator output,
BinaryPredicate comp,
const duplicate_keep_option keep)
{
size_type last_index = thrust::distance(first, last) - 1;
if (keep == duplicate_keep_option::KEEP_NONE) {
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index] __device__(size_type i) {
return (i == 0 || !comp(first[i], first[i - 1])) &&
(i == last_index || !comp(first[i], first[i + 1]));
});
} else {
size_type offset = 1;
if (keep == duplicate_keep_option::KEEP_FIRST) {
last_index = 0;
offset = -1;
}
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index, offset] __device__(size_type i) {
return (i == last_index || !comp(first[i], first[i + offset]));
});
}
}
/**
* @brief Create a column_view of index values which represent the row values
* without duplicates as per @p `keep`
*
* Given a `keys` table_view, each row index is copied to output `unique_indices`, if the
* corresponding row of `keys` table_view is unique, where the definition of unique depends on the
* value of @p keep:
* - KEEP_FIRST: only the first of a sequence of duplicate rows is copied
* - KEEP_LAST: only the last of a sequence of duplicate rows is copied
* - KEEP_NONE: only unique rows are kept
*
* @param[in] keys table_view to identify duplicate rows
* @param[out] unique_indices Column to store the index with unique rows
* @param[in] keep keep first entry, last entry, or no entries if duplicates found
* @param[in] nulls_equal flag to denote nulls are equal if null_equality::EQUAL,
* nulls are not equal if null_equality::UNEQUAL
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @return column_view column_view of unique row index as per specified `keep`, this is actually
* slice of `unique_indices`.
*/
column_view get_unique_ordered_indices(cudf::table_view const& keys,
cudf::mutable_column_view& unique_indices,
duplicate_keep_option keep,
null_equality nulls_equal,
cudaStream_t stream = 0)
{
// sort only indices
auto sorted_indices = sorted_order(
keys, std::vector<order>{}, std::vector<null_order>{}, rmm::mr::get_default_resource(), stream);
// extract unique indices
auto device_input_table = cudf::table_device_view::create(keys, stream);
if (cudf::has_nulls(keys)) {
auto comp = row_equality_comparator<true>(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::detail::slice(
column_view(unique_indices),
0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
} else {
auto comp = row_equality_comparator<false>(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::detail::slice(
column_view(unique_indices),
0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
}
}
cudf::size_type unique_count(table_view const& keys,
null_equality nulls_equal = null_equality::EQUAL,
cudaStream_t stream = 0)
{
// sort only indices
auto sorted_indices = sorted_order(
keys, std::vector<order>{}, std::vector<null_order>{}, rmm::mr::get_default_resource(), stream);
// count unique elements
auto sorted_row_index = sorted_indices->view().data<cudf::size_type>();
auto device_input_table = cudf::table_device_view::create(keys, stream);
if (cudf::has_nulls(keys)) {
row_equality_comparator<true> comp(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
return thrust::count_if(
rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp] __device__(cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i - 1]));
});
} else {
row_equality_comparator<false> comp(
*device_input_table, *device_input_table, nulls_equal == null_equality::EQUAL);
return thrust::count_if(
rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp] __device__(cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i - 1]));
});
}
}
std::unique_ptr<table> drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option keep,
null_equality nulls_equal,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (0 == input.num_rows() || 0 == input.num_columns() || 0 == keys.size()) {
return empty_like(input);
}
auto keys_view = input.select(keys);
// The values will be filled into this column
auto unique_indices = cudf::make_numeric_column(
data_type{INT32}, keys_view.num_rows(), mask_state::UNALLOCATED, stream);
auto mutable_unique_indices_view = unique_indices->mutable_view();
// This is just slice of `unique_indices` but with different size as per the
// keys_view has been processed in `get_unique_ordered_indices`
auto unique_indices_view = detail::get_unique_ordered_indices(
keys_view, mutable_unique_indices_view, keep, nulls_equal, stream);
// run gather operation to establish new order
return detail::gather(input,
unique_indices_view,
detail::out_of_bounds_policy::NULLIFY,
detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
}
/**
* @brief Functor to check for `NAN` at an index in a `column_device_view`.
*
* @tparam T The type of `column_device_view`
*/
template <typename T>
struct check_for_nan {
/*
* @brief Construct from a column_device_view.
*
* @param[in] input The `column_device_view`
*/
check_for_nan(cudf::column_device_view input) : _input{input} {}
/**
* @brief Operator to be called to check for `NAN` at `index` in `_input`
*
* @param[in] index The index at which the `NAN` needs to be checked in `input`
*
* @returns bool true if value at `index` is `NAN` and not null, else false
*/
__device__ bool operator()(size_type index)
{
return std::isnan(_input.data<T>()[index]) and _input.is_valid(index);
}
protected:
cudf::column_device_view _input;
};
/**
* @brief A structure to be used along with type_dispatcher to check if a
* `column_view` has `NAN`.
*/
struct has_nans {
/**
* @brief Checks if `input` has `NAN`
*
* @note This will be applicable only for floating point type columns.
*
* @param[in] input The `column_view` which will be checked for `NAN`
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @returns bool true if `input` has `NAN` else false
*/
template <typename T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr>
bool operator()(column_view const& input, cudaStream_t stream)
{
auto input_device_view = cudf::column_device_view::create(input, stream);
auto device_view = *input_device_view;
auto count = thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(input.size()),
check_for_nan<T>(device_view));
return count > 0;
}
/**
* @brief Checks if `input` has `NAN`
*
* @note This will be applicable only for non-floating point type columns. And
* non-floating point columns can never have `NAN`, so it will always return
* false
*
* @param[in] input The `column_view` which will be checked for `NAN`
* @param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @returns bool Always false as non-floating point columns can't have `NAN`
*/
template <typename T, std::enable_if_t<not std::is_floating_point<T>::value>* = nullptr>
bool operator()(column_view const& input, cudaStream_t stream)
{
return false;
}
};
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling,
cudaStream_t stream)
{
if (0 == input.size() || input.null_count() == input.size()) { return 0; }
cudf::size_type nrows = input.size();
bool has_nan = false;
// Check for Nans
// Checking for nulls in input and flag nan_handling, as the count will
// only get affected if these two conditions are true. NAN will only be
// be an extra if nan_handling was NAN_IS_NULL and input also had null, which
// will increase the count by 1.
if (input.has_nulls() and nan_handling == nan_policy::NAN_IS_NULL) {
has_nan = cudf::type_dispatcher(input.type(), has_nans{}, input, stream);
}
auto count = detail::unique_count(table_view{{input}}, null_equality::EQUAL, stream);
// if nan is considered null and there are already null values
if (nan_handling == nan_policy::NAN_IS_NULL and has_nan and input.has_nulls()) --count;
if (null_handling == null_policy::EXCLUDE and input.has_nulls())
return --count;
else
return count;
}
} // namespace detail
std::unique_ptr<table> drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const keep,
null_equality nulls_equal,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::drop_duplicates(input, keys, keep, nulls_equal, mr);
}
cudf::size_type unique_count(column_view const& input,
null_policy null_handling,
nan_policy nan_handling)
{
CUDF_FUNC_RANGE();
return detail::unique_count(input, null_handling, nan_handling);
}
} // namespace cudf
|
8986c32efa285999cde4cbffa2bbf8cc6e04ab93.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <thrust/for_each.h>
typedef unsigned int uint;
typedef unsigned char uchar;
void checkCudaError(hipError_t error, const char* const filename, const int linenum)
{
if(error != hipSuccess){
fprintf(stderr, "File %s, line %d, CUDA error: %s\n", filename, linenum, hipGetErrorString(error));
exit(-1);
}
}
#define CHECK_CUDA_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
///////////////////////////////////////////////////////
// CPU methods
void histOnCPU(const uchar* const src, uint* hist, const int N)
{
int i = 0;
for(; i < N; ++i){
hist[src[i]]++;
}
}
bool checkCorrectness(const uint* hist1, const uint* hist2, const int N)
{
return (memcmp(hist1, hist2, sizeof(uint) * N) == 0) ? true : false;
}
/////////////////////////////////////////////////////
__global__ void histKernel_1(const uchar* src, uint* hist, int N)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
if(index >= N)
return;
const uchar val = src[index];
atomicAdd(&hist[val], 1);
}
// once read 32x4 = 128 byte
__global__ void histKernel_2(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
atomicAdd(&hist[val[0]], 1);
atomicAdd(&hist[val[1]], 1);
atomicAdd(&hist[val[2]], 1);
atomicAdd(&hist[val[3]], 1);
}
//using shared memory
__shared__ uint histTmp[256];
__global__ void histKernel_3(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
histTmp[threadIdx.x] = 0;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
__syncthreads();
atomicAdd(&histTmp[val[0]], 1);
atomicAdd(&histTmp[val[1]], 1);
atomicAdd(&histTmp[val[2]], 1);
atomicAdd(&histTmp[val[3]], 1);
__syncthreads();
atomicAdd(&hist[threadIdx.x], histTmp[threadIdx.x]);
}
void computeHist(const uchar* src, uint* hist, int N)
{
const int threadPerBlock = 256;
const int nByteSrc = sizeof(uchar)*N;
const int nByteHist = sizeof(uint)*256;
uchar* dev_src;
uint* dev_hist;
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_src, nByteSrc));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_hist, nByteHist));
CHECK_CUDA_ERROR(hipMemcpy(dev_src, src, nByteSrc, hipMemcpyHostToDevice));
CHECK_CUDA_ERROR(hipMemset(dev_hist, 0, nByteHist));
// histKernel_1<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
hipLaunchKernelGGL(( histKernel_2), dim3((N+4*threadPerBlock-1)/(4*threadPerBlock)), dim3(threadPerBlock), 0, 0, dev_src, dev_hist, N);
// histKernel_3<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
CHECK_CUDA_ERROR(hipMemcpy(hist, dev_hist, nByteHist, hipMemcpyDeviceToHost));
CHECK_CUDA_ERROR(hipFree(dev_src));
CHECK_CUDA_ERROR(hipFree(dev_hist));
uint* histCPU = (uint*)malloc(nByteHist);
memset(histCPU, 0, 256*sizeof(int));
histOnCPU(src, histCPU, N);
if(checkCorrectness(hist, histCPU, 256))
printf("Correct\n");
else
printf("Error\n");
}
void randomFillArray(uchar* src, int N)
{
srand(time(NULL));
for(int i = 0; i < N; ++i)
src[i] = (rand()%256);
}
int main()
{
const int N = 256;
const int nByte = sizeof(uchar)*N;
uchar* src = (uchar*)malloc(nByte);
uint* hist = (uint*)malloc(256*sizeof(uint));
randomFillArray(src, N);
computeHist(src, hist, N);
free(src);
free(hist);
return 0;
}
| 8986c32efa285999cde4cbffa2bbf8cc6e04ab93.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <thrust/for_each.h>
typedef unsigned int uint;
typedef unsigned char uchar;
void checkCudaError(cudaError_t error, const char* const filename, const int linenum)
{
if(error != cudaSuccess){
fprintf(stderr, "File %s, line %d, CUDA error: %s\n", filename, linenum, cudaGetErrorString(error));
exit(-1);
}
}
#define CHECK_CUDA_ERROR(error) checkCudaError(error, __FILE__, __LINE__)
///////////////////////////////////////////////////////
// CPU methods
void histOnCPU(const uchar* const src, uint* hist, const int N)
{
int i = 0;
for(; i < N; ++i){
hist[src[i]]++;
}
}
bool checkCorrectness(const uint* hist1, const uint* hist2, const int N)
{
return (memcmp(hist1, hist2, sizeof(uint) * N) == 0) ? true : false;
}
/////////////////////////////////////////////////////
__global__ void histKernel_1(const uchar* src, uint* hist, int N)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
if(index >= N)
return;
const uchar val = src[index];
atomicAdd(&hist[val], 1);
}
// once read 32x4 = 128 byte
__global__ void histKernel_2(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
atomicAdd(&hist[val[0]], 1);
atomicAdd(&hist[val[1]], 1);
atomicAdd(&hist[val[2]], 1);
atomicAdd(&hist[val[3]], 1);
}
//using shared memory
__shared__ uint histTmp[256];
__global__ void histKernel_3(const uchar* src, uint* hist, int N)
{
int index = (blockIdx.x*blockDim.x+threadIdx.x)*4;
if(index >= N)
return;
histTmp[threadIdx.x] = 0;
uchar val[4];
val[0] = src[index];
val[1] = src[index+1];
val[2] = src[index+2];
val[3] = src[index+3];
__syncthreads();
atomicAdd(&histTmp[val[0]], 1);
atomicAdd(&histTmp[val[1]], 1);
atomicAdd(&histTmp[val[2]], 1);
atomicAdd(&histTmp[val[3]], 1);
__syncthreads();
atomicAdd(&hist[threadIdx.x], histTmp[threadIdx.x]);
}
void computeHist(const uchar* src, uint* hist, int N)
{
const int threadPerBlock = 256;
const int nByteSrc = sizeof(uchar)*N;
const int nByteHist = sizeof(uint)*256;
uchar* dev_src;
uint* dev_hist;
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_src, nByteSrc));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_hist, nByteHist));
CHECK_CUDA_ERROR(cudaMemcpy(dev_src, src, nByteSrc, cudaMemcpyHostToDevice));
CHECK_CUDA_ERROR(cudaMemset(dev_hist, 0, nByteHist));
// histKernel_1<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
histKernel_2<<<(N+4*threadPerBlock-1)/(4*threadPerBlock), threadPerBlock>>>(dev_src, dev_hist, N);
// histKernel_3<<<(N+threadPerBlock-1)/threadPerBlock, threadPerBlock>>>(dev_src, dev_hist, N);
CHECK_CUDA_ERROR(cudaMemcpy(hist, dev_hist, nByteHist, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERROR(cudaFree(dev_src));
CHECK_CUDA_ERROR(cudaFree(dev_hist));
uint* histCPU = (uint*)malloc(nByteHist);
memset(histCPU, 0, 256*sizeof(int));
histOnCPU(src, histCPU, N);
if(checkCorrectness(hist, histCPU, 256))
printf("Correct\n");
else
printf("Error\n");
}
void randomFillArray(uchar* src, int N)
{
srand(time(NULL));
for(int i = 0; i < N; ++i)
src[i] = (rand()%256);
}
int main()
{
const int N = 256;
const int nByte = sizeof(uchar)*N;
uchar* src = (uchar*)malloc(nByte);
uint* hist = (uint*)malloc(256*sizeof(uint));
randomFillArray(src, N);
computeHist(src, hist, N);
free(src);
free(hist);
return 0;
}
|
87d794d79a6c1c425c0cbd27d7433a43a7ba9c9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxFull2ForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* target_data, Dtype* loss) {
CUDA_KERNEL_LOOP(index, nthreads) {
loss[index] = -target_data[index] * log(max(prob_data[index], Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void SoftmaxFullLoss2Layer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* target_data = bottom[1]->gpu_data();
const int nthreads = prob_.count();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
hipLaunchKernelGGL(( SoftmaxFull2ForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, target_data, loss_data);
Dtype loss;
for (int i = 0; i < outer_num_; i++) {
caffe_gpu_asum(inner_num_, loss_data + i * inner_num_, &loss);
if (normalize_) {
loss /= prob_.count();
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[i] = loss;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
void SoftmaxFullLoss2Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to target probs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* target_probs = bottom[1]->gpu_data();
caffe_gpu_sub(prob_.count(), prob_data, target_probs, bottom_diff);
for (int i = 0; i < outer_num_; i++) {
const Dtype loss_weight = top[0]->cpu_diff()[i];
Dtype mult = 0;
if (normalize_) {
mult = loss_weight / prob_.count();
} else {
mult = loss_weight / outer_num_;
}
caffe_gpu_scal(inner_num_, mult, bottom_diff + i * inner_num_);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxFullLoss2Layer);
} // namespace caffe
| 87d794d79a6c1c425c0cbd27d7433a43a7ba9c9e.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxFull2ForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* target_data, Dtype* loss) {
CUDA_KERNEL_LOOP(index, nthreads) {
loss[index] = -target_data[index] * log(max(prob_data[index], Dtype(FLT_MIN)));
}
}
template <typename Dtype>
void SoftmaxFullLoss2Layer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* target_data = bottom[1]->gpu_data();
const int nthreads = prob_.count();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
SoftmaxFull2ForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, target_data, loss_data);
Dtype loss;
for (int i = 0; i < outer_num_; i++) {
caffe_gpu_asum(inner_num_, loss_data + i * inner_num_, &loss);
if (normalize_) {
loss /= prob_.count();
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[i] = loss;
}
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
void SoftmaxFullLoss2Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to target probs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* target_probs = bottom[1]->gpu_data();
caffe_gpu_sub(prob_.count(), prob_data, target_probs, bottom_diff);
for (int i = 0; i < outer_num_; i++) {
const Dtype loss_weight = top[0]->cpu_diff()[i];
Dtype mult = 0;
if (normalize_) {
mult = loss_weight / prob_.count();
} else {
mult = loss_weight / outer_num_;
}
caffe_gpu_scal(inner_num_, mult, bottom_diff + i * inner_num_);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxFullLoss2Layer);
} // namespace caffe
|
9eea6e225c74e8f60008cb97951858cee6d4cf01.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* This copy of code is a derivative designed for educational purposes
* and it contains source code provided by NVIDIA Corporation.
*
*/
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;// color of the sphere
float radius;
float x,y,z;// coordinate of the center
// will return the distance between imaginary camera and hit
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x; // distance on x-axis
float dy = oy - y; // distance on y-axis
// if (dx*dx + dy*dy > radius*radius), ray will not hit sphere
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
// n is used in visual effect
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// this is a linear offset into output buffer
int offset = x + y * blockDim.x * gridDim.x;
// shift the (x,y) image coordinate so that z-axis go through center
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;// set the background to black
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n ); // return the distance
if (t > maxz) {
float fscale = n;// improve visual effect
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t; // update maxz everytime a smaller distance is found
}
}
// color the bitmap according to what the ray has 'seen'
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
// declare the data block and other needed variables
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate temp memory for the Sphere dataset on CPU
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
// initialize the Sphere dataset
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
// allocate memory for the Sphere dataset on GPU
HANDLE_ERROR( hipMalloc( (void**)&s, sizeof(Sphere) * SPHERES ) );
// transfer the initialized Sphere dataset from CPU memory to GPU memory
HANDLE_ERROR( hipMemcpy( s, temp_s, sizeof(Sphere) * SPHERES,
hipMemcpyHostToDevice ) );
// generate a bitmap from our sphere data
dim3 grids(DIM/32,DIM/32);
dim3 threads(32,32);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, s, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
// free CPU memory
free( temp_s );
// free GPU memory
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
HANDLE_ERROR( hipFree( s ) );
// display
bitmap.display_and_exit();
}
| 9eea6e225c74e8f60008cb97951858cee6d4cf01.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* This copy of code is a derivative designed for educational purposes
* and it contains source code provided by NVIDIA Corporation.
*
*/
#include "cuda.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;// color of the sphere
float radius;
float x,y,z;// coordinate of the center
// will return the distance between imaginary camera and hit
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x; // distance on x-axis
float dy = oy - y; // distance on y-axis
// if (dx*dx + dy*dy > radius*radius), ray will not hit sphere
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
// n is used in visual effect
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
// this is a linear offset into output buffer
int offset = x + y * blockDim.x * gridDim.x;
// shift the (x,y) image coordinate so that z-axis go through center
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;// set the background to black
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n ); // return the distance
if (t > maxz) {
float fscale = n;// improve visual effect
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t; // update maxz everytime a smaller distance is found
}
}
// color the bitmap according to what the ray has 'seen'
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
// declare the data block and other needed variables
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate temp memory for the Sphere dataset on CPU
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
// initialize the Sphere dataset
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
// allocate memory for the Sphere dataset on GPU
HANDLE_ERROR( cudaMalloc( (void**)&s, sizeof(Sphere) * SPHERES ) );
// transfer the initialized Sphere dataset from CPU memory to GPU memory
HANDLE_ERROR( cudaMemcpy( s, temp_s, sizeof(Sphere) * SPHERES,
cudaMemcpyHostToDevice ) );
// generate a bitmap from our sphere data
dim3 grids(DIM/32,DIM/32);
dim3 threads(32,32);
kernel<<<grids,threads>>>( s, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
// free CPU memory
free( temp_s );
// free GPU memory
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
HANDLE_ERROR( cudaFree( s ) );
// display
bitmap.display_and_exit();
}
|
d277db8925c0870eff34a21687992e56626fda0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* rovernet
*/
#include "cudaYUV.h"
#include "cudaMath.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
hipError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return hipErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
hipLaunchKernelGGL(( RGB_to_YV12<T, formatYV12>), dim3(grid), dim3(block), 0, 0, input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(hipGetLastError());
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
#if 0
__global__ void Gray_to_YV12(const GlobPtrSz<uint8_t> src, GlobPtr<uint8_t> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uint8_t> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uint8_t> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uint8_t> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uint8_t pix;
uint8_t y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
| d277db8925c0870eff34a21687992e56626fda0c.cu | /*
* rovernet
*/
#include "cudaYUV.h"
#include "cudaMath.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
cudaError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return cudaErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
RGB_to_YV12<T, formatYV12><<<grid, block>>>(input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(cudaGetLastError());
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
#if 0
__global__ void Gray_to_YV12(const GlobPtrSz<uint8_t> src, GlobPtr<uint8_t> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uint8_t> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uint8_t> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uint8_t> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uint8_t pix;
uint8_t y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
#endif
|
955beac2d083b3e951c83001b474273168abc1d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellThermoComputeGPU.cu
* \brief Explicitly instantiates reduction operators and declares kernel drivers
* for mpcd::CellThermoComputeGPU.
*/
#include "CellThermoComputeGPU.cuh"
#include "CellThermoTypes.h"
#include "CellCommunicator.cuh"
#include "ReductionOperators.h"
#include "hoomd/WarpTools.cuh"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Begins the cell thermo compute by summing cell quantities on outer cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param d_cells Cell indexes to compute
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param num_cells Number of cells to compute for
*
* \tparam need_energy If true, compute the cell-level energy properties
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory.
*/
template<bool need_energy, unsigned int tpp>
__global__ void begin_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int num_cells)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * num_cells)
return;
const unsigned int cell_id = d_cells[idx / tpp];
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
hoomd::detail::WarpReduce<Scalar, tpp> reducer;
momentum.x = reducer.Sum(momentum.x);
momentum.y = reducer.Sum(momentum.y);
momentum.z = reducer.Sum(momentum.z);
momentum.w = reducer.Sum(momentum.w);
if (need_energy)
ke = reducer.Sum(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w);
if (need_energy)
d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np));
}
}
//! Finalizes the cell thermo compute by properly averaging cell quantities
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per cell, the properties are averaged by mass, number of particles,
* etc. The temperature is computed from the cell kinetic energy.
*/
template<bool need_energy>
__global__ void end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions)
{
// one thread per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= Ncell)
return;
const unsigned int cell_id = d_cells[idx];
// average cell properties if the cell has mass
const double4 cell_vel = d_cell_vel[cell_id];
double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z);
const double mass = cell_vel.w;
if (mass > 0.)
{
// average velocity is only defined when there is some mass in the cell
vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[cell_id];
const double ke = cell_energy.x;
double temp(0.0);
const unsigned int np = __double_as_int(cell_energy.z);
// temperature is only defined for 2 or more particles
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
//! Computes the cell thermo for inner cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param n_dimensions System dimensionality
*
* \tparam need_energy If true, compute the cell-level energy properties.
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory. The properties are properly normalized
*
* See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation
* without the normalization at the end, which is used for the outer cells.
*/
template<bool need_energy, unsigned int tpp>
__global__ void inner_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const Index3D ci,
const Index3D inner_ci,
const uint3 offset,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int n_dimensions)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * inner_ci.getNumElements())
return;
// reinterpret the thread id as a cell by first mapping the thread into the inner indexer,
// shifting by the offset of the inner indexer from the full indexer, and then compressing
// back into a 1D cell id
const uint3 inner_cell = inner_ci.getTriple(idx/tpp);
const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z);
const unsigned int cell_id = ci(cell.x, cell.y, cell.z);
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
hoomd::detail::WarpReduce<Scalar, tpp> reducer;
momentum.x = reducer.Sum(momentum.x);
momentum.y = reducer.Sum(momentum.y);
momentum.z = reducer.Sum(momentum.z);
momentum.w = reducer.Sum(momentum.w);
if (need_energy)
ke = reducer.Sum(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
const double mass = momentum.w;
double3 vel_cm = make_double3(0.0,0.0,0.0);
if (mass > 0.)
{
vel_cm.x = momentum.x / mass;
vel_cm.y = momentum.y / mass;
vel_cm.z = momentum.z / mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
double temp(0.0);
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per \a temporary cell, the cell properties are normalized
* in a way suitable for reduction of net properties, e.g. the cell velocities
* are converted to momentum. The temperature is set to the cell energy, and a
* flag is set to 1 or 0 to indicate whether this cell has an energy that should
* be used in averaging the total temperature.
*/
template<bool need_energy>
__global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D tmp_ci,
const Index3D ci)
{
// one thread per cell
unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (tmp_idx >= tmp_ci.getNumElements())
return;
// use the temporary cell indexer to map to a cell, then use the real cell indexer to
// get the read index
uint3 cell = tmp_ci.getTriple(tmp_idx);
const unsigned int idx = ci(cell.x, cell.y, cell.z);
const double4 vel_mass = d_cell_vel[idx];
const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z);
const double mass = vel_mass.w;
mpcd::detail::cell_thermo_element thermo;
thermo.momentum = make_double3(mass * vel.x,
mass * vel.y,
mass * vel.z);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[idx];
thermo.energy = cell_energy.x;
if (__double_as_int(cell_energy.z) > 1)
{
thermo.temperature = cell_energy.y;
thermo.flag = 1;
}
else
{
thermo.temperature = 0.0;
thermo.flag = 0;
}
}
else
{
thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0;
}
d_tmp_thermo[tmp_idx] = thermo;
}
} // end namespace kernel
//! Templated launcher for multiple threads-per-cell kernel for outer cells
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads to use per-cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
}
else
{
launch_begin_cell_thermo<cur_tpp/2>(args,
d_cells,
num_cells,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::launch_begin_cell_thermo
* \sa mpcd::gpu::kernel::begin_cell_thermo
*/
hipError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (num_cells == 0) return hipSuccess;
launch_begin_cell_thermo<32>(args,
d_cells,
num_cells,
block_size,
tpp);
return hipSuccess;
}
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
* \param need_energy If true, compute the cell-level energy properties
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::kernel::end_cell_thermo
*/
hipError_t end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions,
const bool need_energy,
const unsigned int block_size)
{
if (Ncell == 0) return hipSuccess;
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(Ncell / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(Ncell / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::end_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
return hipSuccess;
}
//! Templated launcher for multiple threads-per-cell kernel for inner cells
/*
* \param args Common arguments to thermo kernels
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>), dim3(grid), dim3(run_block_size), 0, 0, args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
}
else
{
launch_inner_cell_thermo<cur_tpp/2>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*!
* \param args Common arguments for cell thermo compute
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::launch_inner_cell_thermo
* \sa mpcd::gpu::kernel::inner_cell_thermo
*/
hipError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (inner_ci.getNumElements() == 0) return hipSuccess;
launch_inner_cell_thermo<32>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
return hipSuccess;
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
* \param need_energy If true, compute the cell-level energy properties
* \param block_size Number of threads per block
*
* \returns hipSuccess on completion
*
* \sa mpcd::gpu::kernel::stage_net_cell_thermo
*/
hipError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D& tmp_ci,
const Index3D& ci,
bool need_energy,
const unsigned int block_size)
{
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<true>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::stage_net_cell_thermo<false>), dim3(grid), dim3(run_block_size), 0, 0, d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
return hipSuccess;
}
/*!
* \param d_reduced Cell thermo properties reduced across all cells (output on second call)
* \param d_tmp Temporary storage for reduction (output on first call)
* \param tmp_bytes Number of bytes allocated for temporary storage (output on first call)
* \param d_tmp_thermo Cell thermo properties to reduce
* \param Ncell The number of cells to reduce across
*
* \returns hipSuccess on completion
*
* \b Implementation details:
* CUB DeviceReduce is used to perform the reduction. Hence, this function requires
* two calls to perform the reduction. The first call sizes the temporary storage,
* which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate
* the required bytes, and call the function a second time. This performs the
* reduction and returns the result in \a d_reduced.
*/
hipError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced,
void *d_tmp,
size_t& tmp_bytes,
const mpcd::detail::cell_thermo_element *d_tmp_thermo,
const unsigned int Ncell)
{
hipcub::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell);
return hipSuccess;
}
//! Explicit template instantiation of pack for cell velocity
template hipError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf,
const double4 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of pack for cell energy
template hipError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf,
const double3 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of unpack for cell velocity
template hipError_t unpack_cell_buffer(double4 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
//! Explicit template instantiation of unpack for cell energy
template hipError_t unpack_cell_buffer(double3 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
} // end namespace gpu
} // end namespace mpcd
| 955beac2d083b3e951c83001b474273168abc1d7.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/CellThermoComputeGPU.cu
* \brief Explicitly instantiates reduction operators and declares kernel drivers
* for mpcd::CellThermoComputeGPU.
*/
#include "CellThermoComputeGPU.cuh"
#include "CellThermoTypes.h"
#include "CellCommunicator.cuh"
#include "ReductionOperators.h"
#include "hoomd/WarpTools.cuh"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
//! Begins the cell thermo compute by summing cell quantities on outer cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param d_cells Cell indexes to compute
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param num_cells Number of cells to compute for
*
* \tparam need_energy If true, compute the cell-level energy properties
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory.
*/
template<bool need_energy, unsigned int tpp>
__global__ void begin_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int num_cells)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * num_cells)
return;
const unsigned int cell_id = d_cells[idx / tpp];
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += (double)(0.5) * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
hoomd::detail::WarpReduce<Scalar, tpp> reducer;
momentum.x = reducer.Sum(momentum.x);
momentum.y = reducer.Sum(momentum.y);
momentum.z = reducer.Sum(momentum.z);
momentum.w = reducer.Sum(momentum.w);
if (need_energy)
ke = reducer.Sum(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
d_cell_vel[cell_id] = make_double4(momentum.x, momentum.y, momentum.z, momentum.w);
if (need_energy)
d_cell_energy[cell_id] = make_double3(ke, 0.0, __int_as_double(np));
}
}
//! Finalizes the cell thermo compute by properly averaging cell quantities
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per cell, the properties are averaged by mass, number of particles,
* etc. The temperature is computed from the cell kinetic energy.
*/
template<bool need_energy>
__global__ void end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions)
{
// one thread per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= Ncell)
return;
const unsigned int cell_id = d_cells[idx];
// average cell properties if the cell has mass
const double4 cell_vel = d_cell_vel[cell_id];
double3 vel_cm = make_double3(cell_vel.x, cell_vel.y, cell_vel.z);
const double mass = cell_vel.w;
if (mass > 0.)
{
// average velocity is only defined when there is some mass in the cell
vel_cm.x /= mass; vel_cm.y /= mass; vel_cm.z /= mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[cell_id];
const double ke = cell_energy.x;
double temp(0.0);
const unsigned int np = __double_as_int(cell_energy.z);
// temperature is only defined for 2 or more particles
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
//! Computes the cell thermo for inner cells
/*!
* \param d_cell_vel Velocity and mass per cell (output)
* \param d_cell_energy Energy, temperature, number of particles per cell (output)
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param d_cell_np Number of particles per cell
* \param d_cell_list MPCD cell list
* \param cli Indexer into the cell list
* \param d_vel MPCD particle velocities
* \param N_mpcd Number of MPCD particles
* \param mpcd_mass Mass of MPCD particle
* \param d_embed_vel Embedded particle velocity
* \param d_embed_idx Embedded particle indexes
* \param n_dimensions System dimensionality
*
* \tparam need_energy If true, compute the cell-level energy properties.
* \tparam tpp Number of threads to use per cell
*
* \b Implementation details:
* Using \a tpp threads per cell, the cell properties are accumulated into \a d_cell_vel
* and \a d_cell_energy. Shuffle-based intrinsics are used to reduce the accumulated
* properties per-cell, and the first thread for each cell writes the result into
* global memory. The properties are properly normalized
*
* See mpcd::gpu::kernel::begin_cell_thermo for an almost identical implementation
* without the normalization at the end, which is used for the outer cells.
*/
template<bool need_energy, unsigned int tpp>
__global__ void inner_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const Index3D ci,
const Index3D inner_ci,
const uint3 offset,
const unsigned int *d_cell_np,
const unsigned int *d_cell_list,
const Index2D cli,
const Scalar4 *d_vel,
const unsigned int N_mpcd,
const Scalar mpcd_mass,
const Scalar4 *d_embed_vel,
const unsigned int *d_embed_idx,
const unsigned int n_dimensions)
{
// tpp threads per cell
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= tpp * inner_ci.getNumElements())
return;
// reinterpret the thread id as a cell by first mapping the thread into the inner indexer,
// shifting by the offset of the inner indexer from the full indexer, and then compressing
// back into a 1D cell id
const uint3 inner_cell = inner_ci.getTriple(idx/tpp);
const uint3 cell = make_uint3(inner_cell.x + offset.x, inner_cell.y + offset.y, inner_cell.z + offset.z);
const unsigned int cell_id = ci(cell.x, cell.y, cell.z);
const unsigned int np = d_cell_np[cell_id];
double4 momentum = make_double4(0.0, 0.0, 0.0, 0.0);
double ke(0.0);
for (unsigned int offset = (idx % tpp); offset < np; offset += tpp)
{
// Load particle data
const unsigned int cur_p = d_cell_list[cli(offset, cell_id)];
double3 vel_i;
double mass_i;
if (cur_p < N_mpcd)
{
Scalar4 vel_cell = d_vel[cur_p];
vel_i = make_double3(vel_cell.x, vel_cell.y, vel_cell.z);
mass_i = mpcd_mass;
}
else
{
Scalar4 vel_m = d_embed_vel[d_embed_idx[cur_p - N_mpcd]];
vel_i = make_double3(vel_m.x, vel_m.y, vel_m.z);
mass_i = vel_m.w;
}
// add momentum
momentum.x += mass_i * vel_i.x;
momentum.y += mass_i * vel_i.y;
momentum.z += mass_i * vel_i.z;
momentum.w += mass_i;
// also compute ke of the particle
if (need_energy)
ke += 0.5 * mass_i * (vel_i.x * vel_i.x + vel_i.y * vel_i.y + vel_i.z * vel_i.z);
}
// reduce quantities down into the 0-th lane per logical warp
if (tpp > 1)
{
hoomd::detail::WarpReduce<Scalar, tpp> reducer;
momentum.x = reducer.Sum(momentum.x);
momentum.y = reducer.Sum(momentum.y);
momentum.z = reducer.Sum(momentum.z);
momentum.w = reducer.Sum(momentum.w);
if (need_energy)
ke = reducer.Sum(ke);
}
// 0-th lane in each warp writes the result
if (idx % tpp == 0)
{
const double mass = momentum.w;
double3 vel_cm = make_double3(0.0,0.0,0.0);
if (mass > 0.)
{
vel_cm.x = momentum.x / mass;
vel_cm.y = momentum.y / mass;
vel_cm.z = momentum.z / mass;
}
d_cell_vel[cell_id] = make_double4(vel_cm.x, vel_cm.y, vel_cm.z, mass);
if (need_energy)
{
double temp(0.0);
if (np > 1)
{
const double ke_cm = 0.5 * mass * (vel_cm.x*vel_cm.x + vel_cm.y*vel_cm.y + vel_cm.z*vel_cm.z);
temp = 2. * (ke - ke_cm) / (n_dimensions * (np-1));
}
d_cell_energy[cell_id] = make_double3(ke, temp, __int_as_double(np));
}
}
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
*
* \tparam need_energy If true, compute the cell-level energy properties.
*
* \b Implementation details:
* Using one thread per \a temporary cell, the cell properties are normalized
* in a way suitable for reduction of net properties, e.g. the cell velocities
* are converted to momentum. The temperature is set to the cell energy, and a
* flag is set to 1 or 0 to indicate whether this cell has an energy that should
* be used in averaging the total temperature.
*/
template<bool need_energy>
__global__ void stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D tmp_ci,
const Index3D ci)
{
// one thread per cell
unsigned int tmp_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (tmp_idx >= tmp_ci.getNumElements())
return;
// use the temporary cell indexer to map to a cell, then use the real cell indexer to
// get the read index
uint3 cell = tmp_ci.getTriple(tmp_idx);
const unsigned int idx = ci(cell.x, cell.y, cell.z);
const double4 vel_mass = d_cell_vel[idx];
const double3 vel = make_double3(vel_mass.x, vel_mass.y, vel_mass.z);
const double mass = vel_mass.w;
mpcd::detail::cell_thermo_element thermo;
thermo.momentum = make_double3(mass * vel.x,
mass * vel.y,
mass * vel.z);
if (need_energy)
{
const double3 cell_energy = d_cell_energy[idx];
thermo.energy = cell_energy.x;
if (__double_as_int(cell_energy.z) > 1)
{
thermo.temperature = cell_energy.y;
thermo.flag = 1;
}
else
{
thermo.temperature = 0.0;
thermo.flag = 0;
}
}
else
{
thermo.energy = 0.; thermo.temperature = 0.; thermo.flag = 0;
}
d_tmp_thermo[tmp_idx] = thermo;
}
} // end namespace kernel
//! Templated launcher for multiple threads-per-cell kernel for outer cells
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads to use per-cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
mpcd::gpu::kernel::begin_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*num_cells / run_block_size + 1);
mpcd::gpu::kernel::begin_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
d_cells,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
num_cells);
}
}
else
{
launch_begin_cell_thermo<cur_tpp/2>(args,
d_cells,
num_cells,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_begin_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*
* \param args Common arguments to thermo kernels
* \param d_cells Cell indexes to compute
* \param num_cells Number of cells to compute for
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::launch_begin_cell_thermo
* \sa mpcd::gpu::kernel::begin_cell_thermo
*/
cudaError_t begin_cell_thermo(const mpcd::detail::thermo_args_t& args,
const unsigned int *d_cells,
const unsigned int num_cells,
const unsigned int block_size,
const unsigned int tpp)
{
if (num_cells == 0) return cudaSuccess;
launch_begin_cell_thermo<32>(args,
d_cells,
num_cells,
block_size,
tpp);
return cudaSuccess;
}
/*!
* \param d_cell_vel Cell velocity and masses
* \param d_cell_energy Cell energy and temperature
* \param d_cells Cells to compute for
* \param Ncell Number of cells
* \param n_dimensions Number of dimensions in system
* \param need_energy If true, compute the cell-level energy properties
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::kernel::end_cell_thermo
*/
cudaError_t end_cell_thermo(double4 *d_cell_vel,
double3 *d_cell_energy,
const unsigned int *d_cells,
const unsigned int Ncell,
const unsigned int n_dimensions,
const bool need_energy,
const unsigned int block_size)
{
if (Ncell == 0) return cudaSuccess;
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(Ncell / run_block_size + 1);
mpcd::gpu::kernel::end_cell_thermo<true><<<grid, run_block_size>>>(d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::end_cell_thermo<true>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(Ncell / run_block_size + 1);
mpcd::gpu::kernel::end_cell_thermo<false><<<grid, run_block_size>>>(d_cell_vel,
d_cell_energy,
d_cells,
Ncell,
n_dimensions);
}
return cudaSuccess;
}
//! Templated launcher for multiple threads-per-cell kernel for inner cells
/*
* \param args Common arguments to thermo kernels
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \tparam cur_tpp Number of threads-per-cell for this template instantiation
*
* Launchers are recursively instantiated at compile-time in order to match the
* correct number of threads at runtime. If the templated number of threads matches
* the runtime number of threads, then the kernel is launched. Otherwise, the
* next template (with threads reduced by a factor of 2) is launched. This
* recursion is broken by a specialized template for 0 threads, which does no
* work.
*/
template<unsigned int cur_tpp>
inline void launch_inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (cur_tpp == tpp)
{
if (args.need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::inner_cell_thermo<true,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(cur_tpp*ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::inner_cell_thermo<false,cur_tpp><<<grid, run_block_size>>>(args.cell_vel,
args.cell_energy,
ci,
inner_ci,
offset,
args.cell_np,
args.cell_list,
args.cli,
args.vel,
args.N_mpcd,
args.mass,
args.embed_vel,
args.embed_idx,
n_dimensions);
}
}
else
{
launch_inner_cell_thermo<cur_tpp/2>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
}
}
//! Template specialization to break recursion
template<>
inline void launch_inner_cell_thermo<0>(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{ }
/*!
* \param args Common arguments for cell thermo compute
* \param ci Cell indexer
* \param inner_ci Cell indexer for the inner cells
* \param offset Offset of \a inner_ci from \a ci
* \param n_dimensions System dimensionality
* \param block_size Number of threads per block
* \param tpp Number of threads per cell
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::launch_inner_cell_thermo
* \sa mpcd::gpu::kernel::inner_cell_thermo
*/
cudaError_t inner_cell_thermo(const mpcd::detail::thermo_args_t& args,
const Index3D& ci,
const Index3D& inner_ci,
const uint3& offset,
const unsigned int n_dimensions,
const unsigned int block_size,
const unsigned int tpp)
{
if (inner_ci.getNumElements() == 0) return cudaSuccess;
launch_inner_cell_thermo<32>(args,
ci,
inner_ci,
offset,
n_dimensions,
block_size,
tpp);
return cudaSuccess;
}
/*!
* \param d_tmp_thermo Temporary cell packed thermo element
* \param d_cell_vel Cell velocity to reduce
* \param d_cell_energy Cell energy to reduce
* \param tmp_ci Temporary cell indexer for cells undergoing reduction
* \param ci Cell indexer Regular cell list indexer
* \param need_energy If true, compute the cell-level energy properties
* \param block_size Number of threads per block
*
* \returns cudaSuccess on completion
*
* \sa mpcd::gpu::kernel::stage_net_cell_thermo
*/
cudaError_t stage_net_cell_thermo(mpcd::detail::cell_thermo_element *d_tmp_thermo,
const double4 *d_cell_vel,
const double3 *d_cell_energy,
const Index3D& tmp_ci,
const Index3D& ci,
bool need_energy,
const unsigned int block_size)
{
if (need_energy)
{
static unsigned int max_block_size_energy = UINT_MAX;
if (max_block_size_energy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<true>);
max_block_size_energy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_energy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::stage_net_cell_thermo<true><<<grid, run_block_size>>>(d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
else
{
static unsigned int max_block_size_noenergy = UINT_MAX;
if (max_block_size_noenergy == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::stage_net_cell_thermo<false>);
max_block_size_noenergy = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size_noenergy);
dim3 grid(tmp_ci.getNumElements() / run_block_size + 1);
mpcd::gpu::kernel::stage_net_cell_thermo<false><<<grid, run_block_size>>>(d_tmp_thermo,
d_cell_vel,
d_cell_energy,
tmp_ci,
ci);
}
return cudaSuccess;
}
/*!
* \param d_reduced Cell thermo properties reduced across all cells (output on second call)
* \param d_tmp Temporary storage for reduction (output on first call)
* \param tmp_bytes Number of bytes allocated for temporary storage (output on first call)
* \param d_tmp_thermo Cell thermo properties to reduce
* \param Ncell The number of cells to reduce across
*
* \returns cudaSuccess on completion
*
* \b Implementation details:
* CUB DeviceReduce is used to perform the reduction. Hence, this function requires
* two calls to perform the reduction. The first call sizes the temporary storage,
* which is returned in \a d_tmp and \a tmp_bytes. The caller must then allocate
* the required bytes, and call the function a second time. This performs the
* reduction and returns the result in \a d_reduced.
*/
cudaError_t reduce_net_cell_thermo(mpcd::detail::cell_thermo_element *d_reduced,
void *d_tmp,
size_t& tmp_bytes,
const mpcd::detail::cell_thermo_element *d_tmp_thermo,
const unsigned int Ncell)
{
cub::DeviceReduce::Sum(d_tmp, tmp_bytes, d_tmp_thermo, d_reduced, Ncell);
return cudaSuccess;
}
//! Explicit template instantiation of pack for cell velocity
template cudaError_t pack_cell_buffer(typename mpcd::detail::CellVelocityPackOp::element *d_send_buf,
const double4 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of pack for cell energy
template cudaError_t pack_cell_buffer(typename mpcd::detail::CellEnergyPackOp::element *d_send_buf,
const double3 *d_props,
const unsigned int *d_send_idx,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_send,
unsigned int block_size);
//! Explicit template instantiation of unpack for cell velocity
template cudaError_t unpack_cell_buffer(double4 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellVelocityPackOp::element *d_recv_buf,
const mpcd::detail::CellVelocityPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
//! Explicit template instantiation of unpack for cell energy
template cudaError_t unpack_cell_buffer(double3 *d_props,
const unsigned int *d_cells,
const unsigned int *d_recv,
const unsigned int *d_recv_begin,
const unsigned int *d_recv_end,
const typename mpcd::detail::CellEnergyPackOp::element *d_recv_buf,
const mpcd::detail::CellEnergyPackOp op,
const unsigned int num_cells,
const unsigned int block_size);
} // end namespace gpu
} // end namespace mpcd
|
d0272ae1cd5f64828cd488386281accdf3cd2e0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "../common/helper_cuda.h"
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
checkCudaErrors(hipMallocManaged(&x, nsize*sizeof(float)));
// execute kernel
hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, x);
getLastCudaError("my_first_kernel execution failed\n");
// synchronize to wait for kernel to finish, and data copied back
hipDeviceSynchronize();
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,x[n]);
// free memory
checkCudaErrors(hipFree(x));
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 0;
}
| d0272ae1cd5f64828cd488386281accdf3cd2e0d.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "../common/helper_cuda.h"
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
//
// main code
//
int main(int argc, const char **argv)
{
float *x;
int nblocks, nthreads, nsize, n;
// initialise card
findCudaDevice(argc, argv);
// set number of blocks, and threads per block
nblocks = 2;
nthreads = 8;
nsize = nblocks*nthreads ;
// allocate memory for array
checkCudaErrors(cudaMallocManaged(&x, nsize*sizeof(float)));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(x);
getLastCudaError("my_first_kernel execution failed\n");
// synchronize to wait for kernel to finish, and data copied back
cudaDeviceSynchronize();
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,x[n]);
// free memory
checkCudaErrors(cudaFree(x));
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 0;
}
|
89b9516e791356b833b99f70a3fb76357b32d858.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void maxPool(
float* input, float* output, int num_output_rows, int num_output_cols);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) < precision);
}
return;
}
int main(int argc, char** argv) {
if (argc < 3) {
printf("Usag ./maxpool <num of output rows> <num of output columns>");
return -1;
}
int num_output_rows = atoi(argv[1]);
int num_output_cols = atoi(argv[2]);
printf("Running the max pooling for an output matrix %d x %d\n",
num_output_rows, num_output_cols);
int num_input_rows = num_output_rows * 2;
int num_input_cols = num_output_cols * 2;
float* host_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* tmp_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* host_output = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
RandFloatArray(tmp_input, num_input_rows * num_input_cols);
for (int i = 0; i < num_input_rows; i += 2) {
for (int j = 0; j < num_input_cols; j += (NUM_THREADS * 2)) {
for (int kx = 0; kx < 2; ++kx) {
for (int ky = 0; ky < 2; ++ky) {
for (int k = 0; k < NUM_THREADS; ++k) {
host_input[(kx * 2 + ky) * num_output_rows * num_output_cols
+ (i / 2) * num_output_cols + (j / 2) + k] = tmp_input[
(i + kx) * num_input_cols + j + ky * NUM_THREADS + k];
}
}
}
}
}
for (int i = 0; i < num_output_rows; ++i) {
for (int j = 0; j < num_output_cols; ++j) {
float max_value = 0.0f;
for (int kx = 0; kx < 2; kx++) {
for (int ky = 0; ky < 2; ky++) {
float curr_value = tmp_input[
(i * 2 + kx) * num_input_cols + (j * 2 + ky)];
if (curr_value > max_value) {
max_value = curr_value;
}
}
}
host_output[i * num_output_cols + j] = max_value;
}
}
printf("Completed ground truth computation!\n");
float* device_input;
float* device_output;
CUDA_CHECK(hipMalloc((void**) &device_input,
num_input_rows * num_input_cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**) &device_output,
num_output_rows * num_output_cols * sizeof(float)));
float* results = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
CUDA_CHECK(hipMemcpy(device_input, host_input,
num_input_rows * num_input_cols * sizeof(float),
hipMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
hipLaunchKernelGGL(( maxPool), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, device_input, device_output,
num_output_rows, num_output_cols);
hipDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(hipMemcpy(results, device_output,
num_output_rows * num_output_cols * sizeof(float),
hipMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, num_output_rows * num_output_cols);
printf("Correctness Check: Accepted!\n");
free(host_input);
free(tmp_input);
free(host_output);
free(results);
CUDA_CHECK(hipFree(device_input));
CUDA_CHECK(hipFree(device_output));
return 0;
}
| 89b9516e791356b833b99f70a3fb76357b32d858.cu | #include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void maxPool(
float* input, float* output, int num_output_rows, int num_output_cols);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) < precision);
}
return;
}
int main(int argc, char** argv) {
if (argc < 3) {
printf("Usag ./maxpool <num of output rows> <num of output columns>");
return -1;
}
int num_output_rows = atoi(argv[1]);
int num_output_cols = atoi(argv[2]);
printf("Running the max pooling for an output matrix %d x %d\n",
num_output_rows, num_output_cols);
int num_input_rows = num_output_rows * 2;
int num_input_cols = num_output_cols * 2;
float* host_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* tmp_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* host_output = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
RandFloatArray(tmp_input, num_input_rows * num_input_cols);
for (int i = 0; i < num_input_rows; i += 2) {
for (int j = 0; j < num_input_cols; j += (NUM_THREADS * 2)) {
for (int kx = 0; kx < 2; ++kx) {
for (int ky = 0; ky < 2; ++ky) {
for (int k = 0; k < NUM_THREADS; ++k) {
host_input[(kx * 2 + ky) * num_output_rows * num_output_cols
+ (i / 2) * num_output_cols + (j / 2) + k] = tmp_input[
(i + kx) * num_input_cols + j + ky * NUM_THREADS + k];
}
}
}
}
}
for (int i = 0; i < num_output_rows; ++i) {
for (int j = 0; j < num_output_cols; ++j) {
float max_value = 0.0f;
for (int kx = 0; kx < 2; kx++) {
for (int ky = 0; ky < 2; ky++) {
float curr_value = tmp_input[
(i * 2 + kx) * num_input_cols + (j * 2 + ky)];
if (curr_value > max_value) {
max_value = curr_value;
}
}
}
host_output[i * num_output_cols + j] = max_value;
}
}
printf("Completed ground truth computation!\n");
float* device_input;
float* device_output;
CUDA_CHECK(cudaMalloc((void**) &device_input,
num_input_rows * num_input_cols * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**) &device_output,
num_output_rows * num_output_cols * sizeof(float)));
float* results = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
CUDA_CHECK(cudaMemcpy(device_input, host_input,
num_input_rows * num_input_cols * sizeof(float),
cudaMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
maxPool<<<NUM_BLOCKS, NUM_THREADS>>>(device_input, device_output,
num_output_rows, num_output_cols);
cudaDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(cudaMemcpy(results, device_output,
num_output_rows * num_output_cols * sizeof(float),
cudaMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, num_output_rows * num_output_cols);
printf("Correctness Check: Accepted!\n");
free(host_input);
free(tmp_input);
free(host_output);
free(results);
CUDA_CHECK(cudaFree(device_input));
CUDA_CHECK(cudaFree(device_output));
return 0;
}
|
841f7a6e4d37885355d847b78872b98efaf16233.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
template <int block_size, typename size_type>
__device__ void matrixMul(float *C, float *A, float *B, size_type wA,
size_type wB) {
// Block index
size_type bx = blockIdx.x;
size_type by = blockIdx.y;
// Thread index
size_type tx = threadIdx.x;
size_type ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
size_type aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
size_type aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
size_type aStep = block_size;
// Index of the first sub-matrix of B processed by the block
size_type bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
size_type bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
size_type c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
// C wrappers around our template kernel
extern "C" __global__ void matrixMul_bs8_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<8, size_t>(C, A, B, wA, wB);
}
extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<16, size_t>(C, A, B, wA, wB);
}
extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<32, size_t>(C, A, B, wA, wB);
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 841f7a6e4d37885355d847b78872b98efaf16233.cu | /*
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
template <int block_size, typename size_type>
__device__ void matrixMul(float *C, float *A, float *B, size_type wA,
size_type wB) {
// Block index
size_type bx = blockIdx.x;
size_type by = blockIdx.y;
// Thread index
size_type tx = threadIdx.x;
size_type ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
size_type aBegin = wA * block_size * by;
// Index of the last sub-matrix of A processed by the block
size_type aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
size_type aStep = block_size;
// Index of the first sub-matrix of B processed by the block
size_type bBegin = block_size * bx;
// Step size used to iterate through the sub-matrices of B
size_type bStep = block_size * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (size_type a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[block_size][block_size];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[block_size][block_size];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (size_type k = 0; k < block_size; ++k) Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
size_type c = wB * block_size * by + block_size * bx;
C[c + wB * ty + tx] = Csub;
}
// C wrappers around our template kernel
extern "C" __global__ void matrixMul_bs8_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<8, size_t>(C, A, B, wA, wB);
}
extern "C" __global__ void matrixMul_bs16_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<16, size_t>(C, A, B, wA, wB);
}
extern "C" __global__ void matrixMul_bs32_64bit(float *C, float *A, float *B,
size_t wA, size_t wB) {
matrixMul<32, size_t>(C, A, B, wA, wB);
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
f4a4b42c64ff6189e17a6e9e87c2d0e0c4d78a29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Algo to so the weight distribution of 5000 particle on a
grid of 64x64 */
//#include<conio.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define funcCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
printf( "Failed to run stmt %d ", __LINE__); \
printf( "Got CUDA error ... %s \n", hipGetErrorString(err)); \
return -1; \
} \
} while(0)
//__device__ float floorf (float x);
__global__ void parMap(float *p, float *net, int grid)
{
int rID= blockDim.y*blockIdx.y + threadIdx.y;
int x,y, left, right, top, bottom;
float fL,fR,fB,fT;
x = p[rID*2];
y = p[rID*2+1];
left = (int)floorf(x);
right = left + 1;
bottom = (int)floorf(y);
top = bottom +1;
if (left>= grid||right>= grid||top>= grid||bottom>= grid)
{
left=0;
right=1;
top=1;
bottom = 0;
}
fL = x - left;
fR = 1 - fL;
fB = y - bottom;
fT = 1 - fB;
net[grid*left + bottom] = net[grid*left + bottom] +(fT*fR);
net[grid*right + bottom] = net[grid*right + bottom]+(fT*fL);
net[grid*left+ top] = net[grid*left + top] +(fB*fR);
net[grid*right+ top] = net[grid*right + top] +(fB*fL);
}
// main function
int main(int argc, char *argv[])
{
int grid = 1024, i, max = grid, par=1024, sizeGrid= grid*grid, sizePar=par*2;
float netH[sizeGrid], pH[sizePar], *netD, *pD;
//netH = (float )malloc(sizeof(float)*sizeGrid);
//pH = (float )malloc(sizeof(float)*par*2);
//intialising particles.
for( i = 0; i < sizePar; i++)
pH[i]= ((float)rand()/(float)(RAND_MAX) * (float)max);
printf("particle initialised \n ");
for(i=0;i<sizeGrid;i++)
netH[i]=0;
printf("Grid initialised \n ");
// Allocating GPU memory
funcCheck(hipMalloc((void **)&netD, sizeof(float)*sizeGrid));
funcCheck(hipMalloc((void **)&pD, sizeof(float)*sizePar));
printf("Cuda memory allocated \n ");
// funcCheck(hipMemcpy(netD, netH, grid*grid*sizeof(float), hipMemcpyHostToDevice));
funcCheck(hipMemcpy(pD, pH, sizePar*(sizeof(float)), hipMemcpyHostToDevice));
funcCheck(hipMemcpy(netD, netH, sizeGrid*(sizeof(float)), hipMemcpyHostToDevice));
printf("Data cpy to gpu \n \n ");
// Initialize the grid and block dimensions
dim3 dimBlock(32, 1);
dim3 dimGrid((par/32) + 1, 1);
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( parMap), dim3(dimGrid), dim3(dimBlock), 0, 0, netD, pD, grid);
hipError_t err1 = hipPeekAtLastError();
printf("Data back to CPU \n \n ");
// Copy the results in GPU memory back to the CPU
funcCheck(hipMemcpy(netH, netD, sizeof(float)*sizeGrid, hipMemcpyDeviceToHost));
//!! if(x<0) stop print i
//!! denominator -- nan
FILE *f = fopen("file.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
float temp1=par/(sizeGrid);
for ( i = 0; i < sizeGrid; ++i)
{
//printf("%f ",netH[i]);
fprintf (f,"%f ",((netH[i])/temp1));
if (i%grid==0)
{
printf("\n");
fprintf (f," \n" );
}
}
fclose(f);
// Free the GPU memory
funcCheck(hipFree(netD));
funcCheck(hipFree(pD));
// free(netH);
// free(pH);
return 0;
}
| f4a4b42c64ff6189e17a6e9e87c2d0e0c4d78a29.cu | /* Algo to so the weight distribution of 5000 particle on a
grid of 64x64 */
//#include<conio.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define funcCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printf( "Failed to run stmt %d ", __LINE__); \
printf( "Got CUDA error ... %s \n", cudaGetErrorString(err)); \
return -1; \
} \
} while(0)
//__device__ float floorf (float x);
__global__ void parMap(float *p, float *net, int grid)
{
int rID= blockDim.y*blockIdx.y + threadIdx.y;
int x,y, left, right, top, bottom;
float fL,fR,fB,fT;
x = p[rID*2];
y = p[rID*2+1];
left = (int)floorf(x);
right = left + 1;
bottom = (int)floorf(y);
top = bottom +1;
if (left>= grid||right>= grid||top>= grid||bottom>= grid)
{
left=0;
right=1;
top=1;
bottom = 0;
}
fL = x - left;
fR = 1 - fL;
fB = y - bottom;
fT = 1 - fB;
net[grid*left + bottom] = net[grid*left + bottom] +(fT*fR);
net[grid*right + bottom] = net[grid*right + bottom]+(fT*fL);
net[grid*left+ top] = net[grid*left + top] +(fB*fR);
net[grid*right+ top] = net[grid*right + top] +(fB*fL);
}
// main function
int main(int argc, char *argv[])
{
int grid = 1024, i, max = grid, par=1024, sizeGrid= grid*grid, sizePar=par*2;
float netH[sizeGrid], pH[sizePar], *netD, *pD;
//netH = (float )malloc(sizeof(float)*sizeGrid);
//pH = (float )malloc(sizeof(float)*par*2);
//intialising particles.
for( i = 0; i < sizePar; i++)
pH[i]= ((float)rand()/(float)(RAND_MAX) * (float)max);
printf("particle initialised \n ");
for(i=0;i<sizeGrid;i++)
netH[i]=0;
printf("Grid initialised \n ");
// Allocating GPU memory
funcCheck(cudaMalloc((void **)&netD, sizeof(float)*sizeGrid));
funcCheck(cudaMalloc((void **)&pD, sizeof(float)*sizePar));
printf("Cuda memory allocated \n ");
// funcCheck(cudaMemcpy(netD, netH, grid*grid*sizeof(float), cudaMemcpyHostToDevice));
funcCheck(cudaMemcpy(pD, pH, sizePar*(sizeof(float)), cudaMemcpyHostToDevice));
funcCheck(cudaMemcpy(netD, netH, sizeGrid*(sizeof(float)), cudaMemcpyHostToDevice));
printf("Data cpy to gpu \n \n ");
// Initialize the grid and block dimensions
dim3 dimBlock(32, 1);
dim3 dimGrid((par/32) + 1, 1);
//@@ Launch the GPU Kernel here
parMap<<<dimGrid, dimBlock>>>(netD, pD, grid);
cudaError_t err1 = cudaPeekAtLastError();
printf("Data back to CPU \n \n ");
// Copy the results in GPU memory back to the CPU
funcCheck(cudaMemcpy(netH, netD, sizeof(float)*sizeGrid, cudaMemcpyDeviceToHost));
//!! if(x<0) stop print i
//!! denominator -- nan
FILE *f = fopen("file.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
float temp1=par/(sizeGrid);
for ( i = 0; i < sizeGrid; ++i)
{
//printf("%f ",netH[i]);
fprintf (f,"%f ",((netH[i])/temp1));
if (i%grid==0)
{
printf("\n");
fprintf (f," \n" );
}
}
fclose(f);
// Free the GPU memory
funcCheck(cudaFree(netD));
funcCheck(cudaFree(pD));
// free(netH);
// free(pH);
return 0;
}
|
a11d165710dba77248c1c5df194fca029f725dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define TINFO TensorInfo<real, INDTYPE>
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) ScalarConvert<real, accreal>::to(input)
#define F2H(input) ScalarConvert<accreal, real>::to(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T ir = DEVICE_LINEAR_GET(Input, offset+0*hsz);
T ii = DEVICE_LINEAR_GET(Input, offset+1*hsz);
T in = DEVICE_LINEAR_GET(Input, offset+2*hsz);
T hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
accreal rg, ig, ng;
rg = H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r);
ig = H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<accreal>()(&rg, &rg);
TensorSigmoidOp<accreal>()(&ig, &ig);
ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<accreal>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg);
DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig);
DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng);
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n));
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> gradInInput,
TensorInfo<T, IndexType> gradInHidden,
TensorInfo<T, IndexType> gradOutput,
TensorInfo<T, IndexType> gradInputHx,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
T rg = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T ig = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T ng = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T hn = DEVICE_LINEAR_GET(storage, offset+4*hsz);
T go = DEVICE_LINEAR_GET(gradOutput, linearIndex);
offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
accreal gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig);
accreal ghx = H2F(go)*H2F(ig);
accreal gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) );
accreal ghn = gin * H2F(rg);
accreal grg = gin *H2F(hn)*( 1-H2F(rg) )*H2F(rg);
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin);
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn);
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx);
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
accreal ig, fg, cg, og;
accreal f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<accreal>()(&ig, &ig);
TensorSigmoidOp<accreal>()(&fg, &fg);
cg = THCNumerics<accreal>::tanh(cg);
TensorSigmoidOp<accreal>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<accreal>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> storage,
TensorInfo<T, IndexType> gradInGates,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradInputCx,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T og = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
accreal gcx = THCNumerics<accreal>::tanh(H2F(cy));
accreal gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
accreal gig = gcx * H2F(cg);
accreal gfg = gcx * H2F(cx);
accreal gcg = gcx * H2F(ig);
gcx = gcx * H2F(fg);
gig = gig * (1-H2F(ig)) * H2F(ig);
gfg = gfg * (1-H2F(fg)) * H2F(fg);
gcg = gcg * (1-H2F(cg)*H2F(cg));
gog = gog * (1-H2F(og)) * H2F(og);
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMForward) \
<real, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(LSTMBackward) \
<real, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
storageI, gradingatesI, cxI, cyI, \
gradoutI, gradoutcI, gradincxI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUForward)<real, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
inputI, hiddenI, bias1I, bias2I, hxI, hyI, storageI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM)hipLaunchKernelGGL(( THNN_(GRUBackward) \
<real, ITYPE, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
gradininputI, gradinhiddenI, gradoutI, gradinhxI, storageI, \
hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply.");
TINFO inputI = getTensorInfo<real, THCTensor, INDTYPE>(state, input);
TINFO hiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, hidden);
TINFO cxI = getTensorInfo<real, THCTensor, INDTYPE>(state, cx);
TINFO hyI = getTensorInfo<real, THCTensor, INDTYPE>(state, hy);
TINFO cyI = getTensorInfo<real, THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias1)) &&
hid_size*4 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias2)),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TINFO nullinfo = TINFO(NULL, 1, zero, zero);
TINFO bias1I = nullinfo;
TINFO bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
int maxDim = THNN_(minIndexType)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply");
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
TINFO gradingatesI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInGates);
TINFO cxI = getTensorInfo<real, THCTensor, INDTYPE>(state, cx);
TINFO cyI = getTensorInfo<real, THCTensor, INDTYPE>(state, cy);
TINFO gradoutI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutput);
TINFO gradoutcI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutputCell);
TINFO gradincxI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInputCx);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
storageI.collapseDims();
gradingatesI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradincxI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
THCTensor_(resizeAs)(state, gradInputCx, gradOutput);
THCUNN_assertSameGPU(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<uint32_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}else{
THNN_(LSTM_back_ind_wrap)<uint64_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
THCUNN_assertSameGPU
(state, 5, input, hidden, hx, hy, storage);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hx, hy, storage);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply.");
TINFO inputI = getTensorInfo<real, THCTensor, INDTYPE>(state, input);
TINFO hiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, hidden);
TINFO hxI = getTensorInfo<real, THCTensor, INDTYPE>(state, hx);
TINFO hyI = getTensorInfo<real, THCTensor, INDTYPE>(state, hy);
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias1)) &&
hid_size*3 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias2)),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
storageI.collapseDims();
}
INDTYPE zero[1] = {0};
TINFO nullinfo = TINFO(NULL, 1, zero, zero);
TINFO bias1I = nullinfo;
TINFO bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
THArgCheck(THCTensor_(nElement)(state, storage) ==
THCTensor_(nElement)(state, hx)*5,
3, "Storage tensor for fused kernel was not sized correctly.");
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hx, hy, storage);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}else{
THNN_(GRU_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}
THCudaCheck(hipGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
int maxDim = THNN_(minIndexType)(state, 5, gradInInput, gradInHidden, gradOutput,
gradInputHx, storage);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply");
TINFO gradininputI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInInput);
TINFO gradinhiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInHidden);
TINFO gradoutI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutput);
TINFO gradinhxI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInputHx);
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
gradininputI.collapseDims();
gradinhiddenI.collapseDims();
gradoutI.collapseDims();
gradinhxI.collapseDims();
storageI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, gradInputHx, gradOutput);
THCUNN_assertSameGPU(state, 5, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
THNN_(FusedRNNAssertSizes)(state, 3, 4, gradInInput, gradInHidden, gradOutput, gradInputHx);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 5, gradInInput, gradInHidden,
gradOutput, gradInputHx, storage);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<uint32_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}else{
THNN_(GRU_back_ind_wrap)<uint64_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}
THCudaCheck(hipGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
| a11d165710dba77248c1c5df194fca029f725dd7.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/FusedRNNKernel.cu"
#else
#include <cstdarg>
#include "../common.h"
#define TINFO TensorInfo<real, INDTYPE>
//factor will be 3 for GRU and 4 for LSTM
void THNN_(FusedRNNAssertSizes)(THCState *state, int factor, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor *input = va_arg(list, THCTensor*);
THCTensor *hidden = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, hidden),
3, "Input and Hidden tensor sizes should be the same.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, input) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
THAssertMsg(TensorUtils<THCTensor>::getDims(state, hidden) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
for (int arg=2; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
THArgCheck(THCTensor_(nElement)(state, input) ==
THCTensor_(nElement)(state, tens)*factor,
3, "A pointwise tensor was not the right size, should have 1/%u the elements of input/hidden tensor.", arg, factor);
THAssertMsg(TensorUtils<THCTensor>::getDims(state, tens) <= MAX_CUTORCH_DIMS,
"Tensor dimension is too large.");
}
va_end(list);
}
int THNN_(minIndexType)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
THCTensor* tens = va_arg(list, THCTensor*);
int startDim = TensorUtils<THCTensor>::getDims(state, tens);
bool canCollapse = THCTensor_(isContiguous)(state,tens);
for (int arg=1; arg < count; ++arg){
tens = va_arg(list, THCTensor*);
canCollapse = canCollapse && THCTensor_(isContiguous)(state, tens);
if(TensorUtils<THCTensor>::getDims(state, tens) != startDim){
va_end(list);
return -1;
}
}
va_end(list);
if(canCollapse) return -2;
return startDim;
}
bool THNN_(canUse32BitIndexMath)(THCState *state, int count, ...)
{
va_list list;
va_start(list, count);
for (int arg=0; arg < count; ++arg){
THCTensor *tens = va_arg(list, THCTensor*);
if (!TensorUtils<THCTensor>::canUse32BitIndexMath(state, tens)){
va_end(list);
return false;
}
}
va_end(list);
return true;
}
#define DEVICE_LINEAR_GET(D_TENSOR, INDEX) \
D_TENSOR.data[IndexToOffset<T, IndexType, Dims>::get(INDEX, D_TENSOR)]
#define H2F(input) ScalarConvert<real, accreal>::to(input)
#define F2H(input) ScalarConvert<accreal, real>::to(input)
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUForward)(TensorInfo<T, IndexType> Input,
TensorInfo<T, IndexType> Hidden,
TensorInfo<T, IndexType> Bias1,
TensorInfo<T, IndexType> Bias2,
TensorInfo<T, IndexType> _hx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
T ir = DEVICE_LINEAR_GET(Input, offset+0*hsz);
T ii = DEVICE_LINEAR_GET(Input, offset+1*hsz);
T in = DEVICE_LINEAR_GET(Input, offset+2*hsz);
T hr = DEVICE_LINEAR_GET(Hidden,offset+0*hsz);
T hi = DEVICE_LINEAR_GET(Hidden,offset+1*hsz);
T hn = DEVICE_LINEAR_GET(Hidden, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(_hx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
bool has_bias = (Bias1.data != NULL);
T b1r, b1i, b1n, b2r, b2i, b2n;
if(has_bias){
b1r = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+0*hsz);
b1i = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+1*hsz);
b1n = DEVICE_LINEAR_GET(Bias1, linearIndex%hsz+2*hsz);
b2r = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+0*hsz);
b2i = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+1*hsz);
b2n = DEVICE_LINEAR_GET(Bias2, linearIndex%hsz+2*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1r = 0.0; b1i = 0.0; b1n = 0.0;
b2r = 0.0; b2i = 0.0; b2n = 0.0;
#else
b1r = F2H(0.0); b1i = F2H(0.0); b1n = F2H(0.0);
b2r = F2H(0.0); b2i = F2H(0.0); b2n = F2H(0.0);
#endif
}
offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
accreal rg, ig, ng;
rg = H2F(ir) + H2F(hr) + H2F(b1r) + H2F(b2r);
ig = H2F(ii) + H2F(hi) + H2F(b1i) + H2F(b2i);
TensorSigmoidOp<accreal>()(&rg, &rg);
TensorSigmoidOp<accreal>()(&ig, &ig);
ng = H2F(in) + H2F(b1n) + rg*( H2F(hn)+H2F(b2n) );
ng = THCNumerics<accreal>::tanh(ng);
*hy = F2H( ng + ig * ( H2F(hx)-ng ) );
//SAVE FOR BACKWARDS
DEVICE_LINEAR_GET(storage, offset+0*hsz) = F2H(rg);
DEVICE_LINEAR_GET(storage, offset+1*hsz) = F2H(ig);
DEVICE_LINEAR_GET(storage, offset+2*hsz) = F2H(ng);
DEVICE_LINEAR_GET(storage, offset+3*hsz) = hx;
DEVICE_LINEAR_GET(storage, offset+4*hsz) = F2H(H2F(hn) + H2F(b2n));
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(GRUBackward)(TensorInfo<T, IndexType> gradInInput,
TensorInfo<T, IndexType> gradInHidden,
TensorInfo<T, IndexType> gradOutput,
TensorInfo<T, IndexType> gradInputHx,
TensorInfo<T, IndexType> storage,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*5*hsz+linearIndex%hsz;
T rg = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T ig = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T ng = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T hx = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T hn = DEVICE_LINEAR_GET(storage, offset+4*hsz);
T go = DEVICE_LINEAR_GET(gradOutput, linearIndex);
offset = (linearIndex/hsz)*3*hsz+linearIndex%hsz;
accreal gig = H2F(go)*( H2F(hx)-H2F(ng) )*( 1-H2F(ig) )*H2F(ig);
accreal ghx = H2F(go)*H2F(ig);
accreal gin = H2F(go)*( 1-H2F(ig) )*( 1-H2F(ng)*H2F(ng) );
accreal ghn = gin * H2F(rg);
accreal grg = gin *H2F(hn)*( 1-H2F(rg) )*H2F(rg);
DEVICE_LINEAR_GET(gradInInput, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInInput, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInInput, offset+2*hsz) = F2H(gin);
DEVICE_LINEAR_GET(gradInHidden, offset+0*hsz) = F2H(grg);
DEVICE_LINEAR_GET(gradInHidden, offset+1*hsz) = F2H(gig);
DEVICE_LINEAR_GET(gradInHidden, offset+2*hsz) = F2H(ghn);
DEVICE_LINEAR_GET(gradInputHx, linearIndex) = F2H(ghx);
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMForward)(TensorInfo<T, IndexType> input,
TensorInfo<T, IndexType> hidden,
TensorInfo<T, IndexType> bias1,
TensorInfo<T, IndexType> bias2,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _hy,
TensorInfo<T, IndexType> _cy,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x)
{
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T* iig = &DEVICE_LINEAR_GET(input, offset+0*hsz);
T* ifg = &DEVICE_LINEAR_GET(input, offset+1*hsz);
T* icg = &DEVICE_LINEAR_GET(input, offset+2*hsz);
T* iog = &DEVICE_LINEAR_GET(input, offset+3*hsz);
T hig = DEVICE_LINEAR_GET(hidden, offset+0*hsz);
T hfg = DEVICE_LINEAR_GET(hidden, offset+1*hsz);
T hcg = DEVICE_LINEAR_GET(hidden, offset+2*hsz);
T hog = DEVICE_LINEAR_GET(hidden, offset+3*hsz);
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T* hy = &DEVICE_LINEAR_GET(_hy, linearIndex);
T* cy = &DEVICE_LINEAR_GET(_cy, linearIndex);
bool has_bias = (bias1.data != NULL);
T b1i, b1f, b1c, b1o;
T b2i, b2f, b2c, b2o;
if(has_bias){
b1i = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+0*hsz);
b1f = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+1*hsz);
b1c = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+2*hsz);
b1o = DEVICE_LINEAR_GET(bias1, linearIndex%hsz+3*hsz);
b2i = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+0*hsz);
b2f = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+1*hsz);
b2c = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+2*hsz);
b2o = DEVICE_LINEAR_GET(bias2, linearIndex%hsz+3*hsz);
}else{
#ifndef THC_REAL_IS_HALF
b1i = 0.0; b1f = 0.0; b1c = 0.0; b1o = 0.0;
b2i = 0.0; b2f = 0.0; b2c = 0.0; b2o = 0.0;
#else
b1i = F2H(0.0); b1f = F2H(0.0); b1c = F2H(0.0); b1o = F2H(0.0);
b2i = F2H(0.0); b2f = F2H(0.0); b2c = F2H(0.0); b2o = F2H(0.0);
#endif
}
accreal ig, fg, cg, og;
accreal f_hy, f_cy;
ig = H2F(*iig) + H2F(hig) + H2F(b1i) + H2F(b2i);
fg = H2F(*ifg) + H2F(hfg) + H2F(b1f) + H2F(b2f);
cg = H2F(*icg) + H2F(hcg) + H2F(b1c) + H2F(b2c);
og = H2F(*iog) + H2F(hog) + H2F(b1o) + H2F(b2o);
TensorSigmoidOp<accreal>()(&ig, &ig);
TensorSigmoidOp<accreal>()(&fg, &fg);
cg = THCNumerics<accreal>::tanh(cg);
TensorSigmoidOp<accreal>()(&og, &og);
f_cy = (fg * H2F(cx) ) + (ig * cg);
f_hy = og * THCNumerics<accreal>::tanh(f_cy);
*hy = F2H(f_hy);
*cy = F2H(f_cy);
//SAVE FOR BACKWARDS
//Also need cy and cx but can be saved easily in python
*iig = F2H(ig);
*ifg = F2H(fg);
*icg = F2H(cg);
*iog = F2H(og);
}
}
template <typename T, typename IndexType, int Dims>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(32 * 16, 4)
#endif
__global__ void
THNN_(LSTMBackward)(TensorInfo<T, IndexType> storage,
TensorInfo<T, IndexType> gradInGates,
TensorInfo<T, IndexType> _cx,
TensorInfo<T, IndexType> _cy,
TensorInfo<T, IndexType> gradoutput,
TensorInfo<T, IndexType> gradoutputcell,
TensorInfo<T, IndexType> gradInputCx,
IndexType hsz,
IndexType totalElements)
{
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
IndexType offset = (linearIndex/hsz)*4*hsz+linearIndex%hsz;
T ig = DEVICE_LINEAR_GET(storage, offset+0*hsz);
T fg = DEVICE_LINEAR_GET(storage, offset+1*hsz);
T cg = DEVICE_LINEAR_GET(storage, offset+2*hsz);
T og = DEVICE_LINEAR_GET(storage, offset+3*hsz);
T* ih = &DEVICE_LINEAR_GET(gradInGates, offset+0*hsz);
T* fh = &DEVICE_LINEAR_GET(gradInGates, offset+1*hsz);
T* ch = &DEVICE_LINEAR_GET(gradInGates, offset+2*hsz);
T* oh = &DEVICE_LINEAR_GET(gradInGates, offset+3*hsz);
//will return hidden grads here
T cx = DEVICE_LINEAR_GET(_cx, linearIndex);
T cy = DEVICE_LINEAR_GET(_cy, linearIndex);
T* gi = &DEVICE_LINEAR_GET(gradInputCx, linearIndex);
T go = DEVICE_LINEAR_GET(gradoutput, linearIndex);
T goc= DEVICE_LINEAR_GET(gradoutputcell, linearIndex);
accreal gcx = THCNumerics<accreal>::tanh(H2F(cy));
accreal gog = H2F(go) * gcx;
gcx = H2F(go) * H2F(og) * ( 1 - gcx*gcx) + H2F(goc);
accreal gig = gcx * H2F(cg);
accreal gfg = gcx * H2F(cx);
accreal gcg = gcx * H2F(ig);
gcx = gcx * H2F(fg);
gig = gig * (1-H2F(ig)) * H2F(ig);
gfg = gfg * (1-H2F(fg)) * H2F(fg);
gcg = gcg * (1-H2F(cg)*H2F(cg));
gog = gog * (1-H2F(og)) * H2F(og);
*ih = F2H(gig);
*fh = F2H(gfg);
*ch = F2H(gcg);
*oh = F2H(gog);
*gi = F2H(gcx);
}
}
// ************ START Create function calls ********** //
#define FILL_FUNCTION(ITYPE, DIM, FUNCTION) FUNCTION(ITYPE, DIM)
#define FILL_DIM(ITYPE, DIM, FUNCTION) \
switch (DIM) { \
case -2: \
FILL_FUNCTION(ITYPE, -2, FUNCTION); \
break; \
case 1: \
FILL_FUNCTION(ITYPE, 1, FUNCTION); \
break; \
case 2: \
FILL_FUNCTION(ITYPE, 2, FUNCTION); \
break; \
default: \
FILL_FUNCTION(ITYPE, -1, FUNCTION); \
break; \
}
#define LSTM_FORWARD(ITYPE, DIM) THNN_(LSTMForward) \
<real, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>> \
(inputI, hiddenI, \
bias1I, bias2I, cxI, hyI, cyI, \
hid_size, totalElements);
#define LSTM_BACKWARD(ITYPE, DIM) THNN_(LSTMBackward) \
<real, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>> \
(storageI, gradingatesI, cxI, cyI, \
gradoutI, gradoutcI, gradincxI, \
hid_size, totalElements);
#define GRU_FORWARD(ITYPE, DIM) THNN_(GRUForward)<real, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>> \
(inputI, hiddenI, bias1I, bias2I, hxI, hyI, storageI, \
hid_size, totalElements);
#define GRU_BACKWARD(ITYPE, DIM) THNN_(GRUBackward) \
<real, ITYPE, DIM> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>> \
(gradininputI, gradinhiddenI, gradoutI, gradinhxI, storageI, \
hid_size, totalElements);
// ************ END Create actual function calls ************ //
template<typename INDTYPE>
void THNN_(LSTM_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
THCUNN_assertSameGPU(state, 5, input, hidden, hy, cy, cx);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hy, cy, cx);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, cx);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply.");
TINFO inputI = getTensorInfo<real, THCTensor, INDTYPE>(state, input);
TINFO hiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, hidden);
TINFO cxI = getTensorInfo<real, THCTensor, INDTYPE>(state, cx);
TINFO hyI = getTensorInfo<real, THCTensor, INDTYPE>(state, hy);
TINFO cyI = getTensorInfo<real, THCTensor, INDTYPE>(state, cy);
INDTYPE hid_size = cxI.sizes[cxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*4 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias1)) &&
hid_size*4 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias2)),
"Bias in pointwise operation is an incorrect size, must be 4 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
cxI.collapseDims();
hyI.collapseDims();
cyI.collapseDims();
}
INDTYPE zero[1] = {0};
TINFO nullinfo = TINFO(NULL, 1, zero, zero);
TINFO bias1I = nullinfo;
TINFO bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, LSTM_FORWARD);
}
void THNN_(LSTMFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *cx,
THCTensor *hy,
THCTensor *cy)
{
THCTensor_(resizeAs)(state, hy, cx);
THCTensor_(resizeAs)(state, cy, cx);
THNN_(FusedRNNAssertSizes)(state, 4, 5, input, hidden, hy, cy, cx);
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, bias1, bias2, hy, cy, cx);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hy, cy, cx);
}
if(canUse32bi){
THNN_(LSTM_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}else{
THNN_(LSTM_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, cx, hy, cy);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(LSTM_back_ind_wrap)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
int maxDim = THNN_(minIndexType)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply");
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
TINFO gradingatesI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInGates);
TINFO cxI = getTensorInfo<real, THCTensor, INDTYPE>(state, cx);
TINFO cyI = getTensorInfo<real, THCTensor, INDTYPE>(state, cy);
TINFO gradoutI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutput);
TINFO gradoutcI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutputCell);
TINFO gradincxI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInputCx);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
storageI.collapseDims();
gradingatesI.collapseDims();
cxI.collapseDims();
cyI.collapseDims();
gradoutI.collapseDims();
gradoutcI.collapseDims();
gradincxI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, LSTM_BACKWARD);
}
void THNN_(LSTMFused_updateGradInput)(
THCState *state,
THCTensor *storage,
THCTensor *gradInGates,
THCTensor *cx,
THCTensor *cy,
THCTensor *gradOutput,
THCTensor *gradOutputCell,
THCTensor *gradInputCx)
{
THCTensor_(resizeAs)(state, gradInputCx, gradOutput);
THCUNN_assertSameGPU(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
THNN_(FusedRNNAssertSizes)
(state, 4, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
bool canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
if(canUse32bi){
THNN_(LSTM_back_ind_wrap)<uint32_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}else{
THNN_(LSTM_back_ind_wrap)<uint64_t>
(state, storage, gradInGates, cx, cy,
gradOutput, gradOutputCell, gradInputCx);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_forw_ind_wrap)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
bool has_bias = (bias1!=NULL);
int maxDim;
if(has_bias){
THCUNN_assertSameGPU
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
maxDim = THNN_(minIndexType)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
THCUNN_assertSameGPU
(state, 5, input, hidden, hx, hy, storage);
maxDim = THNN_(minIndexType)
(state, 5, input, hidden, hx, hy, storage);
}
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, hx);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply.");
TINFO inputI = getTensorInfo<real, THCTensor, INDTYPE>(state, input);
TINFO hiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, hidden);
TINFO hxI = getTensorInfo<real, THCTensor, INDTYPE>(state, hx);
TINFO hyI = getTensorInfo<real, THCTensor, INDTYPE>(state, hy);
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = hxI.sizes[hxI.dims-1];
if(has_bias){
THAssertMsg( hid_size*3 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias1)) &&
hid_size*3 == static_cast<INDTYPE>(THCTensor_(nElement)(state, bias2)),
"Bias in pointwise operation is an incorrect size, must be 3 x feature size.");
}
if(maxDim == -2){
inputI.collapseDims();
hiddenI.collapseDims();
hyI.collapseDims();
hxI.collapseDims();
storageI.collapseDims();
}
INDTYPE zero[1] = {0};
TINFO nullinfo = TINFO(NULL, 1, zero, zero);
TINFO bias1I = nullinfo;
TINFO bias2I = nullinfo;
if(has_bias){
bias1I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias1);
bias2I = getTensorInfo<real, THCTensor, INDTYPE>(state, bias2);
if(maxDim == -2){
bias1I.collapseDims();
bias2I.collapseDims();
}
}
FILL_DIM(INDTYPE, maxDim, GRU_FORWARD);
}
void THNN_(GRUFused_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *hidden,
THCTensor *bias1,
THCTensor *bias2,
THCTensor *hx,
THCTensor *hy,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, hy, hx);
THNN_(FusedRNNAssertSizes)(state, 3, 4, input, hidden, hx, hy);
THArgCheck(THCTensor_(nElement)(state, storage) ==
THCTensor_(nElement)(state, hx)*5,
3, "Storage tensor for fused kernel was not sized correctly.");
bool has_bias = (bias1!=NULL);
bool canUse32bi;
if(has_bias){
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 7, input, hidden, hx, hy, bias1, bias2, storage);
}else{
canUse32bi = THNN_(canUse32BitIndexMath)
(state, 5, input, hidden, hx, hy, storage);
}
if(canUse32bi){
THNN_(GRU_forw_ind_wrap)<uint32_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}else{
THNN_(GRU_forw_ind_wrap)<uint64_t>
(state, input, hidden, bias1, bias2, hx, hy, storage);
}
THCudaCheck(cudaGetLastError());
}
template<typename INDTYPE>
void THNN_(GRU_back_ind_wrap)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
int maxDim = THNN_(minIndexType)(state, 5, gradInInput, gradInHidden, gradOutput,
gradInputHx, storage);
ptrdiff_t totalElements = TensorUtils<THCTensor>::getNumElements(state, gradOutput);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THAssertMsg(getApplyGrid(state, totalElements, grid, curDevice),
"Could not get grid size for pointwise apply");
TINFO gradininputI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInInput);
TINFO gradinhiddenI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInHidden);
TINFO gradoutI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradOutput);
TINFO gradinhxI = getTensorInfo<real, THCTensor, INDTYPE>(state, gradInputHx);
TINFO storageI = getTensorInfo<real, THCTensor, INDTYPE>(state, storage);
INDTYPE hid_size = gradoutI.sizes[gradoutI.dims-1];
if(maxDim == -2){
gradininputI.collapseDims();
gradinhiddenI.collapseDims();
gradoutI.collapseDims();
gradinhxI.collapseDims();
storageI.collapseDims();
}
FILL_DIM(INDTYPE, maxDim, GRU_BACKWARD);
}
void THNN_(GRUFused_updateGradInput)(
THCState *state,
THCTensor *gradInInput,
THCTensor *gradInHidden,
THCTensor *gradOutput,
THCTensor *gradInputHx,
THCTensor *storage)
{
THCTensor_(resizeAs)(state, gradInputHx, gradOutput);
THCUNN_assertSameGPU(state, 5, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
THNN_(FusedRNNAssertSizes)(state, 3, 4, gradInInput, gradInHidden, gradOutput, gradInputHx);
bool canUse32bi = THNN_(canUse32BitIndexMath)(state, 5, gradInInput, gradInHidden,
gradOutput, gradInputHx, storage);
if(canUse32bi){
THNN_(GRU_back_ind_wrap)<uint32_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}else{
THNN_(GRU_back_ind_wrap)<uint64_t>
(state, gradInInput, gradInHidden, gradOutput, gradInputHx, storage);
}
THCudaCheck(cudaGetLastError());
}
//Clean up compiler namespace
#undef DEVICE_LINEAR_GET
#undef H2F
#undef F2H
#undef EXPAND_FUNCTION
#undef EXPAND_DIM
#undef EXPAND_TYPE
#undef FILL_TYPES_FORWARD
#undef FILL_FORWARD
#undef FILL_TYPES_BACKWARD
#undef FILL_BACKWARD
#endif
|
2dc12af8b3feb5ce9549b6028d68645b6d5700ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* gpuMerge.cu
*
* Created on: Dec 16, 2018
* Author: Orai Dezso Gergely
*/
#include "gpuMerge.cuh"
#include <iostream>
#include <stdio.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define MIN_RUNTIME_VERSION 1000
#define MIN_COMPUTE_VERSION 0x10
int MaxThread = 512;
int BlockNum=2,CoreInBlock=128;
void cmerge(float *l, float *r, float *to, float *end, int length){
//int length = r - l;
float *lend=(r<end) ? r : end;
float *rend=(r+length <end) ? r+length : end;
while(true){
if(l==lend){
while(r<rend){
*to++=*r++;
}
break;
}
if(r>=rend){
while(l<lend){
*to++=*l++;
}
break;
}
*to++ = (*l < *r) ? *l++ : *r++;
}
}
void cpuMergeSort(float *data, unsigned int size, int length=1)
{
float *tmp = new float[size];
float *akt = data;
float *next = tmp;
for (; length < size; length *= 2){
float *end=akt+size;
for(unsigned col = 0; col< size; col+=2*length){
cmerge(akt + col, akt + col + length, next + col, end, length);
}
float *c = akt;
akt=next;
next=c;
}
if(akt!=data)for(unsigned i=0;i<size;++i)data[i]=akt[i];
delete[] tmp;
}
/**
* CUDA kernel what merges two float arrays
*/
__device__ void kernelMerge(float *l, float *r, float *to, float *end, int length){
float *lend=(r<end) ? r : end;
float *rend=(r+length <end) ? r+length : end;
while(true){
if(l==lend){
while(r<rend){
*to++=*r++;
}
break;
}
if(r>=rend){
while(l<lend){
*to++=*l++;
}
break;
}
*to++ = (*l < *r) ? *l++ : *r++;
}
}
/**
* CUDA kernel that sorts a float array
*/
__global__ void gpuKernelMergeSort(float *data, float *tmpIn, unsigned int fullSize, unsigned int size, unsigned int length=1)
{
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
float *tmp = tmpIn + (idx * size);
float *akt = data + (idx * size);
// The size of the last section is diferent so we have to check it
if(data+fullSize > akt) size = (data + fullSize) - akt;
float *next = tmp;
for (; length < size; length *= 2){
float *end=akt+size;
for(unsigned col = 0; col< size; col+=2*length){
kernelMerge(akt + col, akt + col + length, next + col, end, length);
}
float *c = akt;
akt=next;
next=c;
}
if(akt != data+(idx*size))for(unsigned i=0;i<size;++i)data[i]=akt[i];
}
/**
* Host function that copies the data and launches the work on GPU
*/
void gpuMergeSort(float *data, unsigned size)
{
if(size < CoreInBlock*BlockNum*4){
cpuMergeSort(data,size);
return;
}
float *gpuData;
CUDA_CHECK_RETURN(hipMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(hipMemcpy(gpuData, data, sizeof(float)*size, hipMemcpyHostToDevice));
float *tmp;
CUDA_CHECK_RETURN(hipMalloc((void **)&tmp, sizeof(float)*size));
int arraySizeInBlock = CoreInBlock*BlockNum;
hipLaunchKernelGGL(( gpuKernelMergeSort), dim3(BlockNum),dim3(CoreInBlock), 0, 0, gpuData, tmp, size, arraySizeInBlock);
hipLaunchKernelGGL(( gpuKernelMergeSort), dim3(1),dim3(1), 0, 0, gpuData, tmp, size, size, arraySizeInBlock);
CUDA_CHECK_RETURN(hipMemcpy(data, gpuData, sizeof(float)*size, hipMemcpyDeviceToHost));
//cpuMergeSort(data,size,arraySizeInBlock);
CUDA_CHECK_RETURN(hipFree(gpuData));
CUDA_CHECK_RETURN(hipFree(tmp));
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
/**
* Get core/sm for optimalization purposes
*/
int getSPcores(hipDeviceProp_t devProp)
{
int cores = 0;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = 48;
else cores = 32;
break;
case 3: // Kepler
cores = 192;
break;
case 5: // Maxwell
cores = 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = 128;
else if (devProp.minor == 0) cores = 64;
else printf("Unknown device type\n");
break;
case 7: // Volta
if (devProp.minor == 0) cores = 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
bool findCudaDevice(){
int deviceCount, bestDev=-1;
CUDA_CHECK_RETURN(hipGetDeviceCount(&deviceCount));
for (int dev = 0; dev < deviceCount; ++dev)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int runtimeVersion = 0;
hipRuntimeGetVersion(&runtimeVersion);
if (runtimeVersion >= MIN_RUNTIME_VERSION && ((deviceProp.major<<4) + deviceProp.minor) >= MIN_COMPUTE_VERSION)
{
if (bestDev == -1)
{
bestDev = dev;
MaxThread = deviceProp.maxThreadsPerBlock;
BlockNum=deviceProp.multiProcessorCount;
CoreInBlock=getSPcores(deviceProp);
if(CoreInBlock==0)return false;
}
}
}
if(bestDev != -1)hipSetDevice(bestDev);
return bestDev != -1;
}
| 2dc12af8b3feb5ce9549b6028d68645b6d5700ab.cu | /*
* gpuMerge.cu
*
* Created on: Dec 16, 2018
* Author: Orai Dezso Gergely
*/
#include "gpuMerge.cuh"
#include <iostream>
#include <stdio.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define MIN_RUNTIME_VERSION 1000
#define MIN_COMPUTE_VERSION 0x10
int MaxThread = 512;
int BlockNum=2,CoreInBlock=128;
void cmerge(float *l, float *r, float *to, float *end, int length){
//int length = r - l;
float *lend=(r<end) ? r : end;
float *rend=(r+length <end) ? r+length : end;
while(true){
if(l==lend){
while(r<rend){
*to++=*r++;
}
break;
}
if(r>=rend){
while(l<lend){
*to++=*l++;
}
break;
}
*to++ = (*l < *r) ? *l++ : *r++;
}
}
void cpuMergeSort(float *data, unsigned int size, int length=1)
{
float *tmp = new float[size];
float *akt = data;
float *next = tmp;
for (; length < size; length *= 2){
float *end=akt+size;
for(unsigned col = 0; col< size; col+=2*length){
cmerge(akt + col, akt + col + length, next + col, end, length);
}
float *c = akt;
akt=next;
next=c;
}
if(akt!=data)for(unsigned i=0;i<size;++i)data[i]=akt[i];
delete[] tmp;
}
/**
* CUDA kernel what merges two float arrays
*/
__device__ void kernelMerge(float *l, float *r, float *to, float *end, int length){
float *lend=(r<end) ? r : end;
float *rend=(r+length <end) ? r+length : end;
while(true){
if(l==lend){
while(r<rend){
*to++=*r++;
}
break;
}
if(r>=rend){
while(l<lend){
*to++=*l++;
}
break;
}
*to++ = (*l < *r) ? *l++ : *r++;
}
}
/**
* CUDA kernel that sorts a float array
*/
__global__ void gpuKernelMergeSort(float *data, float *tmpIn, unsigned int fullSize, unsigned int size, unsigned int length=1)
{
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
float *tmp = tmpIn + (idx * size);
float *akt = data + (idx * size);
// The size of the last section is diferent so we have to check it
if(data+fullSize > akt) size = (data + fullSize) - akt;
float *next = tmp;
for (; length < size; length *= 2){
float *end=akt+size;
for(unsigned col = 0; col< size; col+=2*length){
kernelMerge(akt + col, akt + col + length, next + col, end, length);
}
float *c = akt;
akt=next;
next=c;
}
if(akt != data+(idx*size))for(unsigned i=0;i<size;++i)data[i]=akt[i];
}
/**
* Host function that copies the data and launches the work on GPU
*/
void gpuMergeSort(float *data, unsigned size)
{
if(size < CoreInBlock*BlockNum*4){
cpuMergeSort(data,size);
return;
}
float *gpuData;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice));
float *tmp;
CUDA_CHECK_RETURN(cudaMalloc((void **)&tmp, sizeof(float)*size));
int arraySizeInBlock = CoreInBlock*BlockNum;
gpuKernelMergeSort<<<BlockNum,CoreInBlock>>>(gpuData, tmp, size, arraySizeInBlock);
gpuKernelMergeSort<<<1,1>>>(gpuData, tmp, size, size, arraySizeInBlock);
CUDA_CHECK_RETURN(cudaMemcpy(data, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost));
//cpuMergeSort(data,size,arraySizeInBlock);
CUDA_CHECK_RETURN(cudaFree(gpuData));
CUDA_CHECK_RETURN(cudaFree(tmp));
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
/**
* Get core/sm for optimalization purposes
*/
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = 48;
else cores = 32;
break;
case 3: // Kepler
cores = 192;
break;
case 5: // Maxwell
cores = 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = 128;
else if (devProp.minor == 0) cores = 64;
else printf("Unknown device type\n");
break;
case 7: // Volta
if (devProp.minor == 0) cores = 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
bool findCudaDevice(){
int deviceCount, bestDev=-1;
CUDA_CHECK_RETURN(cudaGetDeviceCount(&deviceCount));
for (int dev = 0; dev < deviceCount; ++dev)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int runtimeVersion = 0;
cudaRuntimeGetVersion(&runtimeVersion);
if (runtimeVersion >= MIN_RUNTIME_VERSION && ((deviceProp.major<<4) + deviceProp.minor) >= MIN_COMPUTE_VERSION)
{
if (bestDev == -1)
{
bestDev = dev;
MaxThread = deviceProp.maxThreadsPerBlock;
BlockNum=deviceProp.multiProcessorCount;
CoreInBlock=getSPcores(deviceProp);
if(CoreInBlock==0)return false;
}
}
}
if(bestDev != -1)cudaSetDevice(bestDev);
return bestDev != -1;
}
|
8dc959d169b63044e84c6981e5698f86bd9bb83d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"{
#include "rt_host.h"
}
#include "rt_device.cuh"
__global__ void intersect_ray_ellipsoid_c(float3 *ray_arr, float3 camera_start, float3 e_center, float a, float b, float c, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
float a2;
float b2;
float c2;
a2 = a * a;
b2 = b * b;
c2 = c * c;
dir = ray_arr[i];
k1 = dir.x * dir.x * b2 * c2;
k1 += dir.y * dir.y * a2 * c2;
k1 += dir.z * dir.z * a2 * b2;
k2 = camera_start.x * dir.x * b2 * c2 * 2;
k2 += camera_start.y * dir.y * a2 * c2 * 2;
k2 += camera_start.z * dir.z * b2 * a2 * 2;
k3 = camera_start.x * camera_start.x * b2 * c2;
k3 += camera_start.z * camera_start.z * a2 * b2;
k3 += camera_start.y * camera_start.y * a2 * c2;
k3 -= a2 * b2 * c2;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_hyperboloid_c(float3 *ray_arr, float3 camera_start, float3 e_center, float a, float b, float c, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
float a2;
float b2;
float c2;
a2 = a * a;
b2 = b * b;
c2 = c * c;
dir = ray_arr[i];
k1 = dir.x * dir.x * b2 * c2;
k1 += dir.y * dir.y * a2 * c2;
k1 -= dir.z * dir.z * a2 * b2;
k2 = camera_start.x * dir.x * b2 * c2 * 2;
k2 += camera_start.y * dir.y * a2 * c2 * 2;
k2 -= camera_start.z * dir.z * b2 * a2 * 2;
k3 = camera_start.x * camera_start.x * b2 * c2;
k3 -= camera_start.z * camera_start.z * a2 * b2;
k3 += camera_start.y * camera_start.y * a2 * c2;
k3 -= a2 * b2 * c2;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_paraboloid_c(float3 *ray_arr, float3 camera_start, float3 e_center,float p, float q, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
dir = ray_arr[i];
k1 = dir.x * dir.x + dir.y * dir.y;
k2 = dir.x * camera_start.x + camera_start.y * dir.y - 2 * dir.z * p * q;
k3 = q * camera_start.x * camera_start.x + p * camera_start.y * camera_start.y - 2 * camera_start.z * p * q;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_sphere_c(float3 *ray_arr, \
float3 camera_start, \
float3 s_center, \
float s_radius, \
float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float a = dot(ray_arr[i], ray_arr[i]);
float b;
float c;
float t1;
float t2;
float3 dist = sub(camera_start, s_center);
b = 2 * dot(dist, ray_arr[i]);
c = dot(dist, dist) - (s_radius * s_radius);
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_triangle_c(float3 *ray_arr, float3 camera_start,\
float *depth_buf, float3 *vertex, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float3 edge[2];
float3 vec[3];
float det;
float uv[2];
edge[0] = sub(vertex[1],vertex[0]);
edge[1] = sub(vertex[2],vertex[0]);
vec[0] = cross(ray_arr[i], edge[1]);
det = dot(edge[0], vec[0]);
if (det < 1e-8 && det > -1e-8)
return ;
det = 1 / det;
vec[1] = sub(camera_start,vertex[0]);
uv[0] = dot(vec[1], vec[0]) * det;
if (uv[0] < 0 || uv[0] > 1)
return ;
vec[2] = cross(vec[1], edge[0]);
uv[1] = dot(ray_arr[i], vec[2]) * det;
if (uv[1] < 0 || uv[0] + uv[1] > 1)
return ;
float res;
res = dot(edge[1], vec[2]) * det;
if (res > 0 && res < depth_buf[i])
{
depth_buf[i] = res;
index_buf[i] = index;
return ;
}
}
__global__ void intersect_ray_cone_c(float3 *ray_arr, float3 camera_start, float3 position, \
float3 vector, float angle, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float t1;
float t2;
float b;
float c;
float ang = angle;
float3 vec = vector;
float3 dist = sub(camera_start, position);
float a = dot(ray_arr[i], vec);
a = dot(ray_arr[i], ray_arr[i]) - (1 + ang * ang) * a * a;
b = 2 * (dot(ray_arr[i], dist) - (1 + ang * ang) * \
dot(ray_arr[i], vec) * dot(dist, vec));
c = dot(dist, vec);
c = dot(dist, dist) - (1 + ang * ang) * c * c;
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_plane_c(float3 *ray_arr, \
float3 camera_start, \
float *depth_buf, \
float3 normal, \
float d, \
int *index_buf, \
int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
if ((dot(ray_arr[i], normal)) == 0)
return ;
k1 = dot(camera_start, normal) + d;
k2 = dot(ray_arr[i], normal);
if (k1 == 0 || (k1 < 0 && k2 < 0) || (k1 > 0 && k2 > 0))
return ;
k1 = -k1 / k2;
if(k1 < depth_buf[i] && k1 > 0)
{
depth_buf[i] = -k1 / k2;
index_buf[i] = index;
}
}
__global__ void intersect_ray_cylinder_c(float3 *ray_arr, float3 camera_start, \
float3 position, float *depth_buf, float3 vector, \
float radius, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float t1;
float t2;
float b;
float c;
float3 dist = sub(camera_start, position);
float a = dot(ray_arr[i], vector);
a = dot(ray_arr[i], ray_arr[i]) - a * a;
b = 2 * (dot(ray_arr[i], dist) - dot(ray_arr[i], vector) * \
dot(dist, vector));
c = dot(dist, vector);
c = dot(dist, dist) - c * c - radius * radius;
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
} | 8dc959d169b63044e84c6981e5698f86bd9bb83d.cu | extern "C"{
#include "rt_host.h"
}
#include "rt_device.cuh"
__global__ void intersect_ray_ellipsoid_c(float3 *ray_arr, float3 camera_start, float3 e_center, float a, float b, float c, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
float a2;
float b2;
float c2;
a2 = a * a;
b2 = b * b;
c2 = c * c;
dir = ray_arr[i];
k1 = dir.x * dir.x * b2 * c2;
k1 += dir.y * dir.y * a2 * c2;
k1 += dir.z * dir.z * a2 * b2;
k2 = camera_start.x * dir.x * b2 * c2 * 2;
k2 += camera_start.y * dir.y * a2 * c2 * 2;
k2 += camera_start.z * dir.z * b2 * a2 * 2;
k3 = camera_start.x * camera_start.x * b2 * c2;
k3 += camera_start.z * camera_start.z * a2 * b2;
k3 += camera_start.y * camera_start.y * a2 * c2;
k3 -= a2 * b2 * c2;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_hyperboloid_c(float3 *ray_arr, float3 camera_start, float3 e_center, float a, float b, float c, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
float a2;
float b2;
float c2;
a2 = a * a;
b2 = b * b;
c2 = c * c;
dir = ray_arr[i];
k1 = dir.x * dir.x * b2 * c2;
k1 += dir.y * dir.y * a2 * c2;
k1 -= dir.z * dir.z * a2 * b2;
k2 = camera_start.x * dir.x * b2 * c2 * 2;
k2 += camera_start.y * dir.y * a2 * c2 * 2;
k2 -= camera_start.z * dir.z * b2 * a2 * 2;
k3 = camera_start.x * camera_start.x * b2 * c2;
k3 -= camera_start.z * camera_start.z * a2 * b2;
k3 += camera_start.y * camera_start.y * a2 * c2;
k3 -= a2 * b2 * c2;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_paraboloid_c(float3 *ray_arr, float3 camera_start, float3 e_center,float p, float q, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
float k3;
float3 dir;
dir = ray_arr[i];
k1 = dir.x * dir.x + dir.y * dir.y;
k2 = dir.x * camera_start.x + camera_start.y * dir.y - 2 * dir.z * p * q;
k3 = q * camera_start.x * camera_start.x + p * camera_start.y * camera_start.y - 2 * camera_start.z * p * q;
float d = k2 * k2 - 4 * k1 * k3;
if (d >= 0)
{
float t1 = (-k2 + sqrt(d)) / (2 * k1);
float t2 = (-k2 - sqrt(d)) / (2 * k1);
float result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_sphere_c(float3 *ray_arr, \
float3 camera_start, \
float3 s_center, \
float s_radius, \
float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float a = dot(ray_arr[i], ray_arr[i]);
float b;
float c;
float t1;
float t2;
float3 dist = sub(camera_start, s_center);
b = 2 * dot(dist, ray_arr[i]);
c = dot(dist, dist) - (s_radius * s_radius);
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_triangle_c(float3 *ray_arr, float3 camera_start,\
float *depth_buf, float3 *vertex, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float3 edge[2];
float3 vec[3];
float det;
float uv[2];
edge[0] = sub(vertex[1],vertex[0]);
edge[1] = sub(vertex[2],vertex[0]);
vec[0] = cross(ray_arr[i], edge[1]);
det = dot(edge[0], vec[0]);
if (det < 1e-8 && det > -1e-8)
return ;
det = 1 / det;
vec[1] = sub(camera_start,vertex[0]);
uv[0] = dot(vec[1], vec[0]) * det;
if (uv[0] < 0 || uv[0] > 1)
return ;
vec[2] = cross(vec[1], edge[0]);
uv[1] = dot(ray_arr[i], vec[2]) * det;
if (uv[1] < 0 || uv[0] + uv[1] > 1)
return ;
float res;
res = dot(edge[1], vec[2]) * det;
if (res > 0 && res < depth_buf[i])
{
depth_buf[i] = res;
index_buf[i] = index;
return ;
}
}
__global__ void intersect_ray_cone_c(float3 *ray_arr, float3 camera_start, float3 position, \
float3 vector, float angle, float *depth_buf, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float t1;
float t2;
float b;
float c;
float ang = angle;
float3 vec = vector;
float3 dist = sub(camera_start, position);
float a = dot(ray_arr[i], vec);
a = dot(ray_arr[i], ray_arr[i]) - (1 + ang * ang) * a * a;
b = 2 * (dot(ray_arr[i], dist) - (1 + ang * ang) * \
dot(ray_arr[i], vec) * dot(dist, vec));
c = dot(dist, vec);
c = dot(dist, dist) - (1 + ang * ang) * c * c;
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
}
__global__ void intersect_ray_plane_c(float3 *ray_arr, \
float3 camera_start, \
float *depth_buf, \
float3 normal, \
float d, \
int *index_buf, \
int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float k1;
float k2;
if ((dot(ray_arr[i], normal)) == 0)
return ;
k1 = dot(camera_start, normal) + d;
k2 = dot(ray_arr[i], normal);
if (k1 == 0 || (k1 < 0 && k2 < 0) || (k1 > 0 && k2 > 0))
return ;
k1 = -k1 / k2;
if(k1 < depth_buf[i] && k1 > 0)
{
depth_buf[i] = -k1 / k2;
index_buf[i] = index;
}
}
__global__ void intersect_ray_cylinder_c(float3 *ray_arr, float3 camera_start, \
float3 position, float *depth_buf, float3 vector, \
float radius, int *index_buf, int index)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float t1;
float t2;
float b;
float c;
float3 dist = sub(camera_start, position);
float a = dot(ray_arr[i], vector);
a = dot(ray_arr[i], ray_arr[i]) - a * a;
b = 2 * (dot(ray_arr[i], dist) - dot(ray_arr[i], vector) * \
dot(dist, vector));
c = dot(dist, vector);
c = dot(dist, dist) - c * c - radius * radius;
c = b * b - 4 * a * c;
if (c >= 0)
{
c = sqrt(c);
t1 = (-b + c) / (2 * a);
t2 = (-b - c) / (2 * a);
float result;
result = 0;
if ((t1 < t2 && t1 > 0) || (t2 < 0 && t1 >= 0))
result = t1;
if ((t2 < t1 && t2 > 0) || (t1 < 0 && t2 >= 0))
result = t2;
if (t2 == t1 && t2 >= 0)
result = t2;
if (result > 0 && result < depth_buf[i])
{
depth_buf[i] = result;
index_buf[i] = index;
}
}
} |
56faba5406232d407cb1812b9a91ad5be4cad3fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <chrono>
#define BLOCK_COUNT 256u
#define HALF_BLOCK_COUNT 128u
#define BANKS 16
#define LOG_2_BANKS 4
// macro used for computing
// Bank-Conflict-Free Shared Memory Array Indices
#define AVOID_BANK_CONFLICTS(idx) ((idx) >> BANKS + (idx) >> (LOG_2_BANKS << 1))
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void Histogram(unsigned char* data, int size, int* histo)
{
__shared__ int tmp[BLOCK_COUNT];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
tmp[threadIdx.x] = 0;
__syncthreads();
while (idx < size)
{
atomicAdd(&tmp[data[idx]], 1);
idx += offset;
}
__syncthreads();
int i = threadIdx.x;
while (i < BLOCK_COUNT)
{
atomicAdd(&histo[i], tmp[i]);
i += blockDim.x;
}
}
__global__ void Scan(int* histo, int* prefixSum)
{
__shared__ int tmp[BLOCK_COUNT];
int threadId = threadIdx.x;
int offset = 1;
int aIdx = threadIdx.x;
int bIdx = threadIdx.x + HALF_BLOCK_COUNT;
int bankOffsetA = AVOID_BANK_CONFLICTS(aIdx);
int bankOffsetB = AVOID_BANK_CONFLICTS(bIdx);
tmp[aIdx + bankOffsetA] = histo[aIdx];
tmp[bIdx + bankOffsetB] = histo[bIdx];
{
int lvl = BLOCK_COUNT >> 1;
while (lvl > 0)
{
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
tmp[bIndex] += tmp[aIndex];
}
offset <<= 1;
lvl >>= 1;
}
}
if (threadId == 0)
{
tmp[BLOCK_COUNT - 1 + AVOID_BANK_CONFLICTS(BLOCK_COUNT - 1)] = 0;
}
{
int lvl = 1;
while (lvl < BLOCK_COUNT)
{
offset >>= 1;
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
int temp = tmp[aIndex];
tmp[aIndex] = tmp[bIndex];
tmp[bIndex] += temp;
}
lvl <<= 1;
}
}
__syncthreads();
prefixSum[aIdx] = histo[aIdx] + tmp[aIdx + bankOffsetA];
prefixSum[bIdx] = histo[bIdx] + tmp[bIdx + bankOffsetB];
}
__global__ void CountSort(unsigned char* data, int* prefixSum, unsigned char* result, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
int i = idx, j;
while (i < size)
{
j = atomicSub(&prefixSum[data[i]], 1) - 1;
result[j] = data[i];
i += offset;
}
}
int main()
{
int size;
freopen(NULL, "rb", stdin);
fread(&size, sizeof(int), 1, stdin);
unsigned char* data = new unsigned char[size];
fread(data, sizeof(unsigned char), size, stdin);
fclose(stdin);
unsigned char* deviceData;
unsigned char* deviceResult;
int* deviceHisto;
int* devicePrefix;
CSC(hipMalloc((void**)&deviceData, sizeof(unsigned char) * size));
CSC(hipMemcpy(deviceData, data, sizeof(unsigned char) * size, hipMemcpyHostToDevice));
CSC(hipMalloc((void**)&deviceHisto, sizeof(int) * BLOCK_COUNT));
CSC(hipMalloc((void**)&devicePrefix, sizeof(int) * BLOCK_COUNT));
CSC(hipMemset(deviceHisto, 0, sizeof(int) * BLOCK_COUNT));
CSC(hipMalloc((void**)&deviceResult, sizeof(unsigned char) * size));
hipLaunchKernelGGL(( Histogram), dim3(BLOCK_COUNT), dim3(BLOCK_COUNT), 0, 0, deviceData, size, deviceHisto);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
hipLaunchKernelGGL(( Scan), dim3(1), dim3(HALF_BLOCK_COUNT), 0, 0, deviceHisto, devicePrefix);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
hipLaunchKernelGGL(( CountSort), dim3(1), dim3(BLOCK_COUNT), 0, 0, deviceData, devicePrefix, deviceResult, size);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
CSC(hipMemcpy(data, deviceResult, sizeof(unsigned char) * size, hipMemcpyDeviceToHost));
freopen(NULL, "wb", stdout);
fwrite(data, sizeof(unsigned char), size, stdout);
fclose(stdout);
CSC(hipFree(deviceData));
CSC(hipFree(deviceHisto));
CSC(hipFree(devicePrefix));
CSC(hipFree(deviceResult));
delete[] data;
return 0;
} | 56faba5406232d407cb1812b9a91ad5be4cad3fa.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include <sys/time.h>
#include <chrono>
#define BLOCK_COUNT 256u
#define HALF_BLOCK_COUNT 128u
#define BANKS 16
#define LOG_2_BANKS 4
// macro used for computing
// Bank-Conflict-Free Shared Memory Array Indices
#define AVOID_BANK_CONFLICTS(idx) ((idx) >> BANKS + (idx) >> (LOG_2_BANKS << 1))
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void Histogram(unsigned char* data, int size, int* histo)
{
__shared__ int tmp[BLOCK_COUNT];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
tmp[threadIdx.x] = 0;
__syncthreads();
while (idx < size)
{
atomicAdd(&tmp[data[idx]], 1);
idx += offset;
}
__syncthreads();
int i = threadIdx.x;
while (i < BLOCK_COUNT)
{
atomicAdd(&histo[i], tmp[i]);
i += blockDim.x;
}
}
__global__ void Scan(int* histo, int* prefixSum)
{
__shared__ int tmp[BLOCK_COUNT];
int threadId = threadIdx.x;
int offset = 1;
int aIdx = threadIdx.x;
int bIdx = threadIdx.x + HALF_BLOCK_COUNT;
int bankOffsetA = AVOID_BANK_CONFLICTS(aIdx);
int bankOffsetB = AVOID_BANK_CONFLICTS(bIdx);
tmp[aIdx + bankOffsetA] = histo[aIdx];
tmp[bIdx + bankOffsetB] = histo[bIdx];
{
int lvl = BLOCK_COUNT >> 1;
while (lvl > 0)
{
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
tmp[bIndex] += tmp[aIndex];
}
offset <<= 1;
lvl >>= 1;
}
}
if (threadId == 0)
{
tmp[BLOCK_COUNT - 1 + AVOID_BANK_CONFLICTS(BLOCK_COUNT - 1)] = 0;
}
{
int lvl = 1;
while (lvl < BLOCK_COUNT)
{
offset >>= 1;
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
int temp = tmp[aIndex];
tmp[aIndex] = tmp[bIndex];
tmp[bIndex] += temp;
}
lvl <<= 1;
}
}
__syncthreads();
prefixSum[aIdx] = histo[aIdx] + tmp[aIdx + bankOffsetA];
prefixSum[bIdx] = histo[bIdx] + tmp[bIdx + bankOffsetB];
}
__global__ void CountSort(unsigned char* data, int* prefixSum, unsigned char* result, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
int i = idx, j;
while (i < size)
{
j = atomicSub(&prefixSum[data[i]], 1) - 1;
result[j] = data[i];
i += offset;
}
}
int main()
{
int size;
freopen(NULL, "rb", stdin);
fread(&size, sizeof(int), 1, stdin);
unsigned char* data = new unsigned char[size];
fread(data, sizeof(unsigned char), size, stdin);
fclose(stdin);
unsigned char* deviceData;
unsigned char* deviceResult;
int* deviceHisto;
int* devicePrefix;
CSC(cudaMalloc((void**)&deviceData, sizeof(unsigned char) * size));
CSC(cudaMemcpy(deviceData, data, sizeof(unsigned char) * size, cudaMemcpyHostToDevice));
CSC(cudaMalloc((void**)&deviceHisto, sizeof(int) * BLOCK_COUNT));
CSC(cudaMalloc((void**)&devicePrefix, sizeof(int) * BLOCK_COUNT));
CSC(cudaMemset(deviceHisto, 0, sizeof(int) * BLOCK_COUNT));
CSC(cudaMalloc((void**)&deviceResult, sizeof(unsigned char) * size));
Histogram<<<BLOCK_COUNT, BLOCK_COUNT>>>(deviceData, size, deviceHisto);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
Scan<<<1, HALF_BLOCK_COUNT>>>(deviceHisto, devicePrefix);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
CountSort<<<1, BLOCK_COUNT>>>(deviceData, devicePrefix, deviceResult, size);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
CSC(cudaMemcpy(data, deviceResult, sizeof(unsigned char) * size, cudaMemcpyDeviceToHost));
freopen(NULL, "wb", stdout);
fwrite(data, sizeof(unsigned char), size, stdout);
fclose(stdout);
CSC(cudaFree(deviceData));
CSC(cudaFree(deviceHisto));
CSC(cudaFree(devicePrefix));
CSC(cudaFree(deviceResult));
delete[] data;
return 0;
} |
b1921626c33984e492fbc7fdb48e7ecfcae78431.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <rocblas.h>
#include "../MmulHelper.h"
#include <specials_cuda.h>
#include <helpers/PointersManager.h>
namespace nd4j {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
// C array must be in f order
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
hipLaunchKernelGGL(( usualCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc);
}
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++) {
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
}
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
hipLaunchKernelGGL(( usualCudaGemv<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < length)
pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if(tid == 0) {
T3 sum = 0;
for(Nd4jLong i = 0; i < length; ++i)
sum = sum + pairwiseMul[i];
*Z = (T3)alpha * sum + (T3)beta * *Z;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
hipLaunchKernelGGL(( usualCudaDot<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), length*sizeof(T3) + 128, *stream, length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) {
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if(B->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
auto M = A->sizeAt(0);
auto K = A->sizeAt(1);
auto N = B->sizeAt(1);
if(C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if(B->sizeAt(0) != K)
throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if(C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if(C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if(C == nullptr)
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
if(A->ews() != 1) {
pA = pA->dup('f');
toDelete.push_back(pA);
}
if(B->ews() != 1) {
pB = pB->dup('f');
toDelete.push_back(pB);
}
if(C->ews() != 1) {
pC = pC->dup('f');
toDelete.push_back(pC);
}
if(pC->ordering() != 'f') {
auto temp = pA;
pA = new NDArray(pB ->permute({1,0}));
pB = new NDArray(temp->permute({1,0}));
pC = new NDArray(pC ->permute({1,0}));
toDelete.push_back(pA);
toDelete.push_back(pB);
toDelete.push_back(pC);
M = pA->sizeAt(0);
K = pA->sizeAt(1);
N = pB->sizeAt(1);
}
const auto aOrder = pA->ordering();
const auto bOrder = pB->ordering();
const bool transA = aOrder != 'f';
const bool transB = bOrder != 'f';
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const int lda = aOrder == 'f' ? M : K;
const int ldb = bOrder == 'f' ? K : N;
const int ldc = M; // cOrder == 'f' ? M : N;
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if(ABC && aType == DataType::DOUBLE) {
status = hipblasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc);
}
else if(ABC && aType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc);
}
else if(ABC && aType == DataType::HALF) {
float16 alphaH(alpha), betaH(beta);
status = hipblasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc);
}
else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_8I, lda, pB->getSpecialBuffer(), HIP_R_8I, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc);
}
else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_16F, lda, pB->getSpecialBuffer(), HIP_R_16F, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc);
}
else {
dim3 threadsPerBlock(N, M);
dim3 blocksPerGrid(1, 1);
if (M*N > 512){
threadsPerBlock.x = threadsPerBlock.y = 512;
blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(N) / threadsPerBlock.x); // cols
blocksPerGrid.y = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.y); // rows
}
//BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), LIBND4J_TYPES)
}
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C->ews() != 1)
C->assign(pC);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) {
int xLenDim, yLenDim(0);
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if(X->lengthOf() != N)
throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if(Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if(Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
NDArray *pA(const_cast<NDArray*>(A));
if(A->ews() != 1)
pA = pA->dup('f');
const bool transA = pA->ordering() == 'c';
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int lda, lta;
if(transA) { lda = N; lta = M; }
else { lda = M; lta = N; }
const int incx = X->stridesOf()[xLenDim];
const int incy = Y->stridesOf()[yLenDim];
const auto aType = pA->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if(AXY && aType == DataType::DOUBLE) {
status = hipblasDgemv(*handle, transAblas, lda, lta, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy);
}
else if(AXY && aType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemv(*handle, transAblas, lda, lta, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy);
}
else {
dim3 threadsPerBlock(M);
dim3 blocksPerGrid(1);
if (M > 512){
threadsPerBlock.x = 512;
blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.x); // rows
}
//BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), LIBND4J_TYPES)
}
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if(pA != A)
delete pA;
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if(Z != nullptr && !Z->isScalar())
throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if(Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if(Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const Nd4jLong incx = X->stridesOf()[xLenDim];
const Nd4jLong incy = Y->stridesOf()[yLenDim];
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if(!X->isActualOnDeviceSide()) X->syncToDevice();
if(!Y->isActualOnDeviceSide()) Y->syncToDevice();
if(!Z->isActualOnDeviceSide()) Z->syncToDevice();
hipStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512)
threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), LIBND4J_TYPES)
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
} | b1921626c33984e492fbc7fdb48e7ecfcae78431.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <cublas_v2.h>
#include "../MmulHelper.h"
#include <specials_cuda.h>
#include <helpers/PointersManager.h>
namespace nd4j {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
// C array must be in f order
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc);
}
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ Nd4jLong strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++) {
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
}
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < length)
pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if(tid == 0) {
T3 sum = 0;
for(Nd4jLong i = 0; i < length; ++i)
sum = sum + pairwiseMul[i];
*Z = (T3)alpha * sum + (T3)beta * *Z;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
__host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) {
usualCudaDot<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, length*sizeof(T3) + 128, *stream>>>(length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) {
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if(B->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
auto M = A->sizeAt(0);
auto K = A->sizeAt(1);
auto N = B->sizeAt(1);
if(C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if(B->sizeAt(0) != K)
throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if(C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if(C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if(C == nullptr)
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
if(A->ews() != 1) {
pA = pA->dup('f');
toDelete.push_back(pA);
}
if(B->ews() != 1) {
pB = pB->dup('f');
toDelete.push_back(pB);
}
if(C->ews() != 1) {
pC = pC->dup('f');
toDelete.push_back(pC);
}
if(pC->ordering() != 'f') {
auto temp = pA;
pA = new NDArray(pB ->permute({1,0}));
pB = new NDArray(temp->permute({1,0}));
pC = new NDArray(pC ->permute({1,0}));
toDelete.push_back(pA);
toDelete.push_back(pB);
toDelete.push_back(pC);
M = pA->sizeAt(0);
K = pA->sizeAt(1);
N = pB->sizeAt(1);
}
const auto aOrder = pA->ordering();
const auto bOrder = pB->ordering();
const bool transA = aOrder != 'f';
const bool transB = bOrder != 'f';
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
const int lda = aOrder == 'f' ? M : K;
const int ldb = bOrder == 'f' ? K : N;
const int ldc = M; // cOrder == 'f' ? M : N;
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if(ABC && aType == DataType::DOUBLE) {
status = cublasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc);
}
else if(ABC && aType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc);
}
else if(ABC && aType == DataType::HALF) {
float16 alphaH(alpha), betaH(beta);
status = cublasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc);
}
else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_8I, lda, pB->getSpecialBuffer(), CUDA_R_8I, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc);
}
else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_16F, lda, pB->getSpecialBuffer(), CUDA_R_16F, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc);
}
else {
dim3 threadsPerBlock(N, M);
dim3 blocksPerGrid(1, 1);
if (M*N > 512){
threadsPerBlock.x = threadsPerBlock.y = 512;
blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(N) / threadsPerBlock.x); // cols
blocksPerGrid.y = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.y); // rows
}
//BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), LIBND4J_TYPES)
}
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C->ews() != 1)
C->assign(pC);
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) {
int xLenDim, yLenDim(0);
if(A->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if(X->lengthOf() != N)
throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if(Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if(Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
NDArray *pA(const_cast<NDArray*>(A));
if(A->ews() != 1)
pA = pA->dup('f');
const bool transA = pA->ordering() == 'c';
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
int lda, lta;
if(transA) { lda = N; lta = M; }
else { lda = M; lta = N; }
const int incx = X->stridesOf()[xLenDim];
const int incy = Y->stridesOf()[yLenDim];
const auto aType = pA->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if(AXY && aType == DataType::DOUBLE) {
status = cublasDgemv(*handle, transAblas, lda, lta, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy);
}
else if(AXY && aType == DataType::FLOAT32) {
float alphaF(alpha), betaF(beta);
status = cublasSgemv(*handle, transAblas, lda, lta, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy);
}
else {
dim3 threadsPerBlock(M);
dim3 blocksPerGrid(1);
if (M > 512){
threadsPerBlock.x = 512;
blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.x); // rows
}
//BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), LIBND4J_TYPES)
}
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if(pA != A)
delete pA;
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if(Z != nullptr && !Z->isScalar())
throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if(Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if(Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const Nd4jLong incx = X->stridesOf()[xLenDim];
const Nd4jLong incy = Y->stridesOf()[yLenDim];
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if(!X->isActualOnDeviceSide()) X->syncToDevice();
if(!Y->isActualOnDeviceSide()) Y->syncToDevice();
if(!Z->isActualOnDeviceSide()) Z->syncToDevice();
cudaStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512)
threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), LIBND4J_TYPES)
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
} |
90d1fa215ab99441faae0fcc929d842cc69fbebd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_print( long const* p, int n)
{
printf("long: %d ",n);
for(int i=0; i<n; i++)
printf("%ld ",*(p+i));
} | 90d1fa215ab99441faae0fcc929d842cc69fbebd.cu | #include "includes.h"
__global__ void kernel_print( long const* p, int n)
{
printf("long: %d ",n);
for(int i=0; i<n; i++)
printf("%ld ",*(p+i));
} |
721459610393f55c6be6e2055b2cb7d0bc5e54c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void update_e( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
//int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1;
int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = Hx[fidx];
hy[tk] = Hy[fidx];
hz[tk] = Hz[fidx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[fidx+1];
hy[tk+1] = Hy[fidx+1];
}
__syncthreads();
Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
Ey[fidx] += CEy[fidx]*( hx[tk+1] - hx[tk] - Hz[fidx+Nyz] + hz[tk] );
Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - hy[tk] - Hx[fidx+Nz] + hx[tk] );
}
__global__ void update_h( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
//int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1;
int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = Ex[fidx];
ey[tk+1] = Ey[fidx];
ez[tk] = Ez[fidx];
if ( tk==0 ) {
ex[0] = Ex[fidx-1];
ey[0] = Ey[fidx-1];
}
__syncthreads();
Hx[fidx] -= 0.5*( ez[tk] - Ez[fidx-Nz] - ey[tk+1] + ey[tk] );
Hy[fidx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[fidx-Nyz] );
Hz[fidx] -= 0.5*( ey[tk+1] - Ey[fidx-Nyz] - ex[tk+1] + Ex[fidx-Nz] );
}
| 721459610393f55c6be6e2055b2cb7d0bc5e54c9.cu | __global__ void update_e( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
//int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1;
int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = Hx[fidx];
hy[tk] = Hy[fidx];
hz[tk] = Hz[fidx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = Hx[fidx+1];
hy[tk+1] = Hy[fidx+1];
}
__syncthreads();
Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - hz[tk] - hy[tk+1] + hy[tk] );
Ey[fidx] += CEy[fidx]*( hx[tk+1] - hx[tk] - Hz[fidx+Nyz] + hz[tk] );
Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - hy[tk] - Hx[fidx+Nz] + hx[tk] );
}
__global__ void update_h( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk;
int Nyz = Ny*Nz;
//int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1;
int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = Ex[fidx];
ey[tk+1] = Ey[fidx];
ez[tk] = Ez[fidx];
if ( tk==0 ) {
ex[0] = Ex[fidx-1];
ey[0] = Ey[fidx-1];
}
__syncthreads();
Hx[fidx] -= 0.5*( ez[tk] - Ez[fidx-Nz] - ey[tk+1] + ey[tk] );
Hy[fidx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[fidx-Nyz] );
Hz[fidx] -= 0.5*( ey[tk+1] - Ey[fidx-Nyz] - ex[tk+1] + Ex[fidx-Nz] );
}
|
f65b937d78cd60c8e45933ed5f7706900b01796c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudafeature/feature-online-batched-spectral-cuda-kernels.cu
//
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens, Levi Barnes
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <roctracer/roctx.h>
#include <hipcub/hipcub.hpp>
#endif
#include "cudafeat/feature-online-batched-spectral-cuda-kernels.h"
#include "cudafeat/lane-desc.h"
#include "cudamatrix/cu-rand.h"
namespace kaldi {
// Mimics the functionality of mel_banks_compute_kernel
// (found in feature-spectral-cuda.cu). The 3rd
// dimension (z) of the block grid gives the hardware
// "lane". lanes tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_mel_banks_compute_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
float energy_floor, int32 *offsets, int32 *sizes, float **vecs,
const float *feats, int32_t ldf, float *mels, int32_t ldm, bool use_log) {
// Specialize WarpReduce for type float
typedef hipcub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
int lane = blockIdx.z;
LaneDesc desc = lanes[lane];
int num_frames = desc.num_chunk_frames;
// TODO get offsets, sizes, and vecs from laneInfo?
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + lane * max_chunk_frames * ldf + offset;
// perfom local sum
float sum = 0;
if (frame < num_frames) { // exclude frames beyond the end
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0 && frame < num_frames) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = val;
} else {
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = sum;
}
}
}
// Mimics the functionality of apply_lifter_and_floor_energy
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_apply_lifter_and_floor_energy_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int num_cols, float cepstral_lifter, bool use_energy, float energy_floor,
float *log_energy, int32_t ldl, float *lifter_coeffs, float *features,
int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (frame >= desc.num_chunk_frames) return;
float *feats = features + frame * ldf + lane * max_chunk_frames * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame + ldl * lane];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Mimics the functionality of process_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_process_window_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing, float *tmp_windows, int32_t ldt,
float *windows, int32_t ldw) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (row >= desc.num_chunk_frames) return;
float *tmp_window = tmp_windows + row * ldt + lane * max_chunk_frames * ldt;
float *window = windows + row * ldw + lane * max_chunk_frames * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row + lane * lde] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__host__ __device__ inline int32 FirstSampleOfFrame(int32 frame,
int32 frame_shift,
int32 window_size,
bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
// Mimics the functionality of extract_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
// Extra samples not processed in this chunk are moved to
// "stash" where they'll be pre-pended to the next chunk
// from this channel
__global__ void batched_extract_window_kernel(
const LaneDesc *lanes, int32_t num_lanes, int32 frame_shift,
int32 frame_length, int32 frame_length_padded, bool snip_edges,
const BaseFloat *__restrict__ wave, int32_t ldw,
BaseFloat *__restrict__ windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
// local frame number
int32_t fidx = blockIdx.x;
int32_t tidx = threadIdx.x;
int32_t lane = blockIdx.y;
const LaneDesc desc = lanes[lane];
ChannelId channel = desc.channel;
// This is the current sample that is pointed to by wave
int32_t current_sample = desc.current_sample;
// current frame we are computing in global space
int32_t current_frame = desc.current_frame;
// global frame number computed by this block
int32_t global_frame = current_frame + fidx;
int32_t num_chunk_samples = desc.num_chunk_samples;
if (fidx >= desc.num_chunk_frames) return;
// offset input/output by channels or lanes
stash = stash + channel * lds;
wave = wave + lane * ldw;
BaseFloat *window = windows + fidx * wlda + gridDim.x * lane * wlda;
// This is the first sample needed to compute this frame
int32_t start_sample =
FirstSampleOfFrame(global_frame, frame_shift, window_size, snip_edges);
// Sample offset is how much we have to offset our index
// into the input wave.
int32_t wave_start = start_sample - current_sample;
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32_t wave_end = wave_start + frame_length;
// wave_start will be negative on successive chunks as we need
// to grab context from stash.
if ((current_frame > 0 || wave_start >= 0) && wave_end <= num_chunk_samples) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
int32_t widx = wave_start + i;
BaseFloat val;
if (widx >= 0) {
val = wave[widx];
} else {
// widx is negative. Add it to the stash size
// to get the correct index into the stash
int32_t sidx = ssize + widx;
val = stash[sidx];
}
window[i] = val;
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = wave_start + s;
while (s_in_wave < 0 || s_in_wave >= num_chunk_samples) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * num_chunk_samples - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
// This is the batched version. The y-dimension of the grid
// give the lane number
__global__ void batched_dot_log_kernel(int32_t max_chunk_frames,
int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
// Specialize WarpReduce for type float
typedef hipcub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
int32_t lane = blockIdx.y;
float *in = signal_frame + frame * lds + max_chunk_frames * lane * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame + lane * lde] = logf(sum);
}
}
__global__ void batched_update_stash_kernel(const LaneDesc *lanes,
int32_t num_lanes,
const BaseFloat *wave, int32_t ldw,
BaseFloat *stash, int32_t num_stash,
int32_t lds) {
int32_t lane = blockIdx.x;
LaneDesc desc = lanes[lane];
int32_t channel = desc.channel;
int32_t num_chunk_samples = desc.num_chunk_samples;
// offset memory by lane or channel
wave = wave + lane * ldw;
stash = stash + channel * lds;
int32_t sample_offset = num_chunk_samples - num_stash;
for (int i = threadIdx.x; i < num_stash; i += blockDim.x) {
int32_t idx = sample_offset + i;
float val;
if (idx < 0) {
// data must come from old stash
val = stash[idx + num_stash];
} else {
// data comes from new wave
val = wave[idx];
}
__syncthreads();
stash[i] = val;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
const float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<const float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
void cuda_power_spectrum(int32_t max_chunk_frames, int32_t num_lanes,
int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo, bool use_power) {
hipLaunchKernelGGL(( power_spectrum_kernel), dim3(max_chunk_frames * num_lanes), dim3(CU1DBLOCK), 0, 0,
row_length, A_in, ldi, A_out, ldo, use_power);
}
void cuda_mel_banks_compute(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32_t num_bins,
float energy_floor, int32 *offsets, int32 *sizes,
float **vecs, const float *feats, int32_t ldf,
float *mels, int32_t ldm, bool use_log) {
dim3 Bl(32, 8);
dim3 Gr(num_bins, (max_chunk_frames + Bl.y - 1) / Bl.y, num_lanes);
hipLaunchKernelGGL(( batched_mel_banks_compute_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, max_chunk_frames, energy_floor, offsets, sizes, vecs,
feats, ldf, mels, ldm, use_log);
}
void cuda_apply_lifter_and_floor_energy(const LaneDesc *lanes,
int32_t num_lanes,
int32_t max_chunk_frames, int num_cols,
float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy,
int32_t ldl, float *lifter_coeffs,
float *features, int32_t ldf) {
dim3 Gr(max_chunk_frames, num_lanes);
hipLaunchKernelGGL(( batched_apply_lifter_and_floor_energy_kernel), dim3(Gr), dim3(CU1DBLOCK), 0, 0,
lanes, num_lanes, max_chunk_frames, num_cols, cepstral_lifter, use_energy,
energy_floor, log_energy, ldl, lifter_coeffs, features, ldf);
}
void cuda_process_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int frame_length,
float dither, float energy_floor,
bool remove_dc_offset, float preemph_coeff,
bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing,
float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
hipLaunchKernelGGL(( batched_process_window_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, max_chunk_frames, frame_length, dither, energy_floor,
remove_dc_offset, preemph_coeff, need_raw_log_energy,
log_energy_pre_window, lde, windowing, tmp_windows, ldt, windows, ldw);
}
void cuda_extract_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32 frame_shift,
int32 frame_length, int32 frame_length_padded,
bool snip_edges, const float *wave, int32_t ldw,
float *windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
hipLaunchKernelGGL(( batched_extract_window_kernel), dim3(Gr), dim3(Bl), 0, 0,
lanes, num_lanes, frame_shift, frame_length, frame_length_padded,
snip_edges, wave, ldw, windows, window_size, wlda, stash, ssize, lds);
}
void cuda_dot_log(int32_t max_chunk_frames, int32_t num_lanes,
int32_t frame_length, float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
dim3 Gr(max_chunk_frames, num_lanes);
hipLaunchKernelGGL(( batched_dot_log_kernel), dim3(Gr), dim3(CU1DBLOCK), 0, 0, max_chunk_frames, frame_length,
signal_frame, lds,
signal_log_energy, lde);
}
void cuda_update_stash(const LaneDesc *lanes, int32_t num_lanes,
const BaseFloat *wave, int32_t ldw, BaseFloat *stash,
int32_t num_stash, int32_t lds) {
int Gr = num_lanes;
int Bl = 1024;
hipLaunchKernelGGL(( batched_update_stash_kernel), dim3(Gr), dim3(Bl), 0, 0, lanes, num_lanes, wave, ldw, stash,
num_stash, lds);
}
} // namespace kaldi
| f65b937d78cd60c8e45933ed5f7706900b01796c.cu | // cudafeature/feature-online-batched-spectral-cuda-kernels.cu
//
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
// Justin Luitjens, Levi Barnes
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if HAVE_CUDA == 1
#include <nvToolsExt.h>
#include <cub/cub.cuh>
#endif
#include "cudafeat/feature-online-batched-spectral-cuda-kernels.h"
#include "cudafeat/lane-desc.h"
#include "cudamatrix/cu-rand.h"
namespace kaldi {
// Mimics the functionality of mel_banks_compute_kernel
// (found in feature-spectral-cuda.cu). The 3rd
// dimension (z) of the block grid gives the hardware
// "lane". lanes tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_mel_banks_compute_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
float energy_floor, int32 *offsets, int32 *sizes, float **vecs,
const float *feats, int32_t ldf, float *mels, int32_t ldm, bool use_log) {
// Specialize WarpReduce for type float
typedef cub::WarpReduce<float> WarpReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename WarpReduce::TempStorage temp_storage[8];
// warp will work together to compute sum
int tid = threadIdx.x;
int wid = threadIdx.y;
// blocks in the x dimension take different bins
int bin = blockIdx.x;
// frame is a combination of blocks in the y dimension and threads in the y
// dimension
int frame = blockIdx.y * blockDim.y + threadIdx.y;
int lane = blockIdx.z;
LaneDesc desc = lanes[lane];
int num_frames = desc.num_chunk_frames;
// TODO get offsets, sizes, and vecs from laneInfo?
int offset = offsets[bin];
int size = sizes[bin];
const float *v = vecs[bin];
const float *w = feats + frame * ldf + lane * max_chunk_frames * ldf + offset;
// perfom local sum
float sum = 0;
if (frame < num_frames) { // exclude frames beyond the end
for (int idx = tid; idx < size; idx += 32) {
sum += v[idx] * w[idx];
}
}
// Sum in cub
sum = WarpReduce(temp_storage[wid]).Sum(sum);
if (tid == 0 && frame < num_frames) {
if (use_log) {
// avoid log of zero
if (sum < energy_floor) sum = energy_floor;
float val = logf(sum);
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = val;
} else {
mels[lane * max_chunk_frames * ldm + frame * ldm + bin] = sum;
}
}
}
// Mimics the functionality of apply_lifter_and_floor_energy
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_apply_lifter_and_floor_energy_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int num_cols, float cepstral_lifter, bool use_energy, float energy_floor,
float *log_energy, int32_t ldl, float *lifter_coeffs, float *features,
int32_t ldf) {
int thread_id = threadIdx.x;
int frame = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (frame >= desc.num_chunk_frames) return;
float *feats = features + frame * ldf + lane * max_chunk_frames * ldf;
// apply lifter coefficients
if (cepstral_lifter != 0.0f) {
for (int c = thread_id; c < num_cols; c += CU1DBLOCK) {
float lift = lifter_coeffs[c];
float f = feats[c];
feats[c] = f * lift;
}
}
// Thread 0 for each frame will apply energy
if (use_energy && thread_id == 0) {
float energy = log_energy[frame + ldl * lane];
float log_energy_floor = log(energy_floor);
if (energy_floor > 0.0f && energy < log_energy_floor) {
energy = log_energy_floor;
}
feats[0] = energy;
}
}
// Mimics the functionality of process_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
__global__ void batched_process_window_kernel(
const LaneDesc *lanes, int32_t n_lanes, int32_t max_chunk_frames,
int frame_length, float dither, float energy_floor, bool remove_dc_offset,
float preemph_coeff, bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing, float *tmp_windows, int32_t ldt,
float *windows, int32_t ldw) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int thread_id = threadIdx.x;
int row = blockIdx.x;
int lane = blockIdx.y;
LaneDesc desc = lanes[lane];
if (row >= desc.num_chunk_frames) return;
float *tmp_window = tmp_windows + row * ldt + lane * max_chunk_frames * ldt;
float *window = windows + row * ldw + lane * max_chunk_frames * ldw;
__shared__ float ssum;
float sum = 0;
float wdot = 0;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
// tmp_window contains optional dither. Apply that on read.
float wval = window[idx];
if (dither != 0.0f) {
wval += tmp_window[idx] * dither;
}
// compute local sum for removing dc offset
sum += wval;
// compute dot product for log energy
wdot += wval * wval;
float windowing_mul = 1;
if (remove_dc_offset == false && preemph_coeff == 0.0f) {
// we are done here so set windowing multiplication on write.
windowing_mul = windowing[idx];
}
// write dithered output
window[idx] = wval * windowing_mul;
}
__syncthreads();
if (remove_dc_offset) {
// we will recompute this below
wdot = 0.0f;
// use cub to reduce
sum = BlockReduce(temp_storage).Sum(sum);
// broadcast sum to entire block
if (thread_id == 0) ssum = sum;
__syncthreads();
sum = -ssum / frame_length;
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float windowing_mul = 1;
float *out = window;
if (preemph_coeff == 0.0f) {
// we are done here so apply windowing
windowing_mul = windowing[idx];
} else {
// write to temp window as we will copy back into window
// when doing pre-emphasis
out = tmp_window;
}
// updated window value
float wval = window[idx] + sum;
// compute new dot product with dc offset removed
wdot += wval * wval;
// write output
out[idx] = wval * windowing_mul;
}
}
__syncthreads();
// if pointer is not NULL we will set energy to either
// the computed energy or 0 depending on need_raw_log_energy
if (log_energy_pre_window != NULL) {
float energy = 0.0f;
if (need_raw_log_energy) {
// must sync to use retemp_storage
if (remove_dc_offset) __syncthreads();
// use cub to reduce
wdot = BlockReduce(temp_storage).Sum(wdot);
energy = max(wdot, energy_floor);
}
if (thread_id == 0) {
log_energy_pre_window[row + lane * lde] = log(energy);
}
}
// TODO this could be more efficient using shared memory instead of
// tmp_window.
if (preemph_coeff != 0.0f) {
// wait for tmp_window to be computed
__threadfence();
__syncthreads();
// starting thread idx at 0 to keep writes aligned.
// unaligned reads are less painful then unaligned writes
for (int idx = thread_id; idx < frame_length; idx += CU1DBLOCK) {
float wval = tmp_window[idx];
float prev_window = wval;
if (idx > 0) {
prev_window = tmp_window[idx - 1];
}
// use __fmul_rn to match CPU
// window[idx] = (wval - preemph_coeff*prev_window) * windowing[idx];
window[idx] =
(wval - __fmul_rn(preemph_coeff, prev_window)) * windowing[idx];
}
}
}
__host__ __device__ inline int32 FirstSampleOfFrame(int32 frame,
int32 frame_shift,
int32 window_size,
bool snip_edges) {
if (snip_edges) {
return frame * frame_shift;
} else {
int32 midpoint_of_frame = frame_shift * frame + frame_shift / 2,
beginning_of_frame = midpoint_of_frame - window_size / 2;
return beginning_of_frame;
}
}
// Mimics the functionality of extract_window_kernel
// (found in feature-spectral-cuda.cu) for a chunk of data
// from several audio channels. The 2nd dimension
// (y) of the block grid gives the hardware "lane".
// The lanes array tells us which channel is in this lane,
// what current frame and sample are processed in this
// batch, etc.
// Extra samples not processed in this chunk are moved to
// "stash" where they'll be pre-pended to the next chunk
// from this channel
__global__ void batched_extract_window_kernel(
const LaneDesc *lanes, int32_t num_lanes, int32 frame_shift,
int32 frame_length, int32 frame_length_padded, bool snip_edges,
const BaseFloat *__restrict__ wave, int32_t ldw,
BaseFloat *__restrict__ windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
// local frame number
int32_t fidx = blockIdx.x;
int32_t tidx = threadIdx.x;
int32_t lane = blockIdx.y;
const LaneDesc desc = lanes[lane];
ChannelId channel = desc.channel;
// This is the current sample that is pointed to by wave
int32_t current_sample = desc.current_sample;
// current frame we are computing in global space
int32_t current_frame = desc.current_frame;
// global frame number computed by this block
int32_t global_frame = current_frame + fidx;
int32_t num_chunk_samples = desc.num_chunk_samples;
if (fidx >= desc.num_chunk_frames) return;
// offset input/output by channels or lanes
stash = stash + channel * lds;
wave = wave + lane * ldw;
BaseFloat *window = windows + fidx * wlda + gridDim.x * lane * wlda;
// This is the first sample needed to compute this frame
int32_t start_sample =
FirstSampleOfFrame(global_frame, frame_shift, window_size, snip_edges);
// Sample offset is how much we have to offset our index
// into the input wave.
int32_t wave_start = start_sample - current_sample;
// wave_start and wave_end are start and end indexes into 'wave', for the
// piece of wave that we're trying to extract.
int32_t wave_end = wave_start + frame_length;
// wave_start will be negative on successive chunks as we need
// to grab context from stash.
if ((current_frame > 0 || wave_start >= 0) && wave_end <= num_chunk_samples) {
// the normal case-- no edge effects to consider.
for (int i = tidx; i < frame_length; i += blockDim.x) {
int32_t widx = wave_start + i;
BaseFloat val;
if (widx >= 0) {
val = wave[widx];
} else {
// widx is negative. Add it to the stash size
// to get the correct index into the stash
int32_t sidx = ssize + widx;
val = stash[sidx];
}
window[i] = val;
}
} else {
// Deal with any end effects by reflection, if needed. This code will only
// be reached for about two frames per utterance, so we don't concern
// ourselves excessively with efficiency.
for (int s = tidx; s < frame_length; s += blockDim.x) {
int32 s_in_wave = wave_start + s;
while (s_in_wave < 0 || s_in_wave >= num_chunk_samples) {
// reflect around the beginning or end of the wave.
// e.g. -1 -> 0, -2 -> 1.
// dim -> dim - 1, dim + 1 -> dim - 2.
// the code supports repeated reflections, although this
// would only be needed in pathological cases.
if (s_in_wave < 0)
s_in_wave = -s_in_wave - 1;
else
s_in_wave = 2 * num_chunk_samples - 1 - s_in_wave;
}
window[s] = wave[s_in_wave];
}
}
if (frame_length_padded > frame_length) {
for (int i = frame_length + tidx; i < frame_length_padded;
i += blockDim.x) {
window[i] = 0.0f;
}
}
}
// For each frame
// compute logf(dot(signal_frame, signal_frame))
// This is the batched version. The y-dimension of the grid
// give the lane number
__global__ void batched_dot_log_kernel(int32_t max_chunk_frames,
int32_t frame_length,
float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
// Specialize WarpReduce for type float
typedef cub::BlockReduce<float, CU1DBLOCK> BlockReduce;
// Allocate WarpReduce shared memory for 8 warps
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t frame = blockIdx.x;
int32_t tid = threadIdx.x;
int32_t lane = blockIdx.y;
float *in = signal_frame + frame * lds + max_chunk_frames * lane * lds;
float sum = 0;
// preform local dot product
for (int32_t i = tid; i < frame_length; i += blockDim.x) {
float val = in[i];
sum += val * val;
}
// reduce using cub
sum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
signal_log_energy[frame + lane * lde] = logf(sum);
}
}
__global__ void batched_update_stash_kernel(const LaneDesc *lanes,
int32_t num_lanes,
const BaseFloat *wave, int32_t ldw,
BaseFloat *stash, int32_t num_stash,
int32_t lds) {
int32_t lane = blockIdx.x;
LaneDesc desc = lanes[lane];
int32_t channel = desc.channel;
int32_t num_chunk_samples = desc.num_chunk_samples;
// offset memory by lane or channel
wave = wave + lane * ldw;
stash = stash + channel * lds;
int32_t sample_offset = num_chunk_samples - num_stash;
for (int i = threadIdx.x; i < num_stash; i += blockDim.x) {
int32_t idx = sample_offset + i;
float val;
if (idx < 0) {
// data must come from old stash
val = stash[idx + num_stash];
} else {
// data comes from new wave
val = wave[idx];
}
__syncthreads();
stash[i] = val;
}
}
// Each threadblock computes a different row of the matrix.
// Threads in the same block compute the row collaboratively.
// This kernel must be called out of place (A_in!=A_out).
__global__ void power_spectrum_kernel(int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo,
bool use_power) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
const float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<const float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
if (use_power) {
Aw[idx] = ret;
} else {
Aw[idx] = sqrtf(ret);
}
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
if (use_power) {
Aw[0] = real * real;
Aw[half_length] = im * im;
} else {
Aw[0] = fabs(real);
Aw[half_length] = fabs(im);
}
}
}
void cuda_power_spectrum(int32_t max_chunk_frames, int32_t num_lanes,
int row_length, const float *A_in, int32_t ldi,
float *A_out, int32_t ldo, bool use_power) {
power_spectrum_kernel<<<max_chunk_frames * num_lanes, CU1DBLOCK>>>(
row_length, A_in, ldi, A_out, ldo, use_power);
}
void cuda_mel_banks_compute(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32_t num_bins,
float energy_floor, int32 *offsets, int32 *sizes,
float **vecs, const float *feats, int32_t ldf,
float *mels, int32_t ldm, bool use_log) {
dim3 Bl(32, 8);
dim3 Gr(num_bins, (max_chunk_frames + Bl.y - 1) / Bl.y, num_lanes);
batched_mel_banks_compute_kernel<<<Gr, Bl>>>(
lanes, num_lanes, max_chunk_frames, energy_floor, offsets, sizes, vecs,
feats, ldf, mels, ldm, use_log);
}
void cuda_apply_lifter_and_floor_energy(const LaneDesc *lanes,
int32_t num_lanes,
int32_t max_chunk_frames, int num_cols,
float cepstral_lifter, bool use_energy,
float energy_floor, float *log_energy,
int32_t ldl, float *lifter_coeffs,
float *features, int32_t ldf) {
dim3 Gr(max_chunk_frames, num_lanes);
batched_apply_lifter_and_floor_energy_kernel<<<Gr, CU1DBLOCK>>>(
lanes, num_lanes, max_chunk_frames, num_cols, cepstral_lifter, use_energy,
energy_floor, log_energy, ldl, lifter_coeffs, features, ldf);
}
void cuda_process_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int frame_length,
float dither, float energy_floor,
bool remove_dc_offset, float preemph_coeff,
bool need_raw_log_energy, float *log_energy_pre_window,
int32_t lde, const float *windowing,
float *tmp_windows, int32_t ldt, float *windows,
int32_t ldw) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
batched_process_window_kernel<<<Gr, Bl>>>(
lanes, num_lanes, max_chunk_frames, frame_length, dither, energy_floor,
remove_dc_offset, preemph_coeff, need_raw_log_energy,
log_energy_pre_window, lde, windowing, tmp_windows, ldt, windows, ldw);
}
void cuda_extract_window(const LaneDesc *lanes, int32_t num_lanes,
int32_t max_chunk_frames, int32 frame_shift,
int32 frame_length, int32 frame_length_padded,
bool snip_edges, const float *wave, int32_t ldw,
float *windows, int32_t window_size, int32_t wlda,
BaseFloat *stash, int32_t ssize, int32_t lds) {
dim3 Gr(max_chunk_frames, num_lanes);
int Bl = CU1DBLOCK;
batched_extract_window_kernel<<<Gr, Bl>>>(
lanes, num_lanes, frame_shift, frame_length, frame_length_padded,
snip_edges, wave, ldw, windows, window_size, wlda, stash, ssize, lds);
}
void cuda_dot_log(int32_t max_chunk_frames, int32_t num_lanes,
int32_t frame_length, float *signal_frame, int32_t lds,
float *signal_log_energy, int32_t lde) {
dim3 Gr(max_chunk_frames, num_lanes);
batched_dot_log_kernel<<<Gr, CU1DBLOCK>>>(max_chunk_frames, frame_length,
signal_frame, lds,
signal_log_energy, lde);
}
void cuda_update_stash(const LaneDesc *lanes, int32_t num_lanes,
const BaseFloat *wave, int32_t ldw, BaseFloat *stash,
int32_t num_stash, int32_t lds) {
int Gr = num_lanes;
int Bl = 1024;
batched_update_stash_kernel<<<Gr, Bl>>>(lanes, num_lanes, wave, ldw, stash,
num_stash, lds);
}
} // namespace kaldi
|
d0ab1a386c7e54ae5b936374cbf06df282d11f10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <yoga_wfs.h>
#include <yoga_ao_utils.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T> struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<> struct SharedMemory<double>
{
__device__ inline operator double*()
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
__device__ inline operator const double*() const
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
};
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
__global__ void camplipup_krnl(cuFloatComplex *amplipup, float *phase,float *offset, float *mask, int *istart,
int *jstart, int *ivalid, int *jvalid, int nphase, int nphase2, int npup, int Nfft, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
int nim = tid / nphase2;
int idim = tid - nim * nphase2;
int idimx = idim % nphase; // nphase : size of the phase support in subaps
int idimy = idim / nphase;
int idphase = idimx + idimy * npup + istart[ivalid[nim]] + jstart[jvalid[nim]] * npup;
// npup : size of the input phase screen
int idx = idimx + idimy * Nfft + nim * Nfft * Nfft;
amplipup[idx].x = (cosf(phase[idphase]-offset[idim]))*mask[idphase];
amplipup[idx].y = (sinf(phase[idphase]-offset[idim]))*mask[idphase];
tid += blockDim.x * gridDim.x;
}
}
int fillcamplipup2(cuFloatComplex *amplipup, float *phase, float *offset, float *mask, int *istart, int *jstart,
int *ivalid, int *jvalid, int nphase, int npup, int Nfft, int Ntot, int device)
// here amplipup is a cube of data of size nfft x nfft x nsubap
// phase is an array of size pupdiam x pupdiam
// offset is an array of size pdiam x pdiam
// mask is an array of size pupdiam x pupdiam
// number of thread required : pdiam x pdiam x nsubap
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (Ntot + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (Ntot + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
int nphase2 = nphase * nphase;
hipLaunchKernelGGL(( camplipup_krnl), dim3(grid), dim3(threads), 0, 0, amplipup,phase,offset,mask,istart,jstart,ivalid,jvalid,nphase,nphase2,npup,Nfft,Ntot);
cutilCheckMsg("fillcamplipup_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void bimg_krnl(float *bimage, float *bcube, int npix, int npix2, int nsub, int *ivalid, int *jvalid, float alpha, int N)
{
/*
indx is an array nrebin^2 * npix^2
it gives the nrebin x nrebin pixels in the hrimage per npix x npix pixels of the subap
Npix = npix x npix
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
int nim = tid / npix2;
int tidim = tid - nim * npix2;
int xim = tidim % npix;
int yim = tidim / npix;
int idbin = xim + yim * nsub + ivalid[nim] * npix + jvalid[nim] * npix * nsub;
bimage[idbin] = alpha * bimage[idbin] + bcube[tid];
tid += blockDim.x * gridDim.x;
}
}
int fillbinimg(float *bimage, float *bcube, int npix, int nsub, int Nsub, int *ivalid, int *jvalid, bool add, int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int Npix = npix * npix;
int N = Npix * nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
float alpha;
if (add) alpha = 1.0f;
else alpha = 0.0f;
hipLaunchKernelGGL(( bimg_krnl), dim3(grid), dim3(threads), 0, 0, bimage,bcube,npix,Npix,Nsub,ivalid,jvalid,alpha,N);
cutilCheckMsg("binimg_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void fillbincube_krnl(float *bcube, float *hrimage, int *indxpix, int Nfft, int Npix, int Nrebin, int N)
{
/*
indx is an array nrebin^2 * npix^2
it gives the nrebin x nrebin pixels in the hrimage per npix x npix pixels of the subap
Npix = npix x npix
*/
int npix,nsubap,nrebin;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nsubap = tid / Npix;
npix = tid % Npix;
for (int i=0;i<Nrebin;i++) {
nrebin = indxpix[i+npix*Nrebin];
bcube[tid] += hrimage[nrebin + Nfft*nsubap];
}
tid += blockDim.x * gridDim.x;
}
}
int fillbincube(float *bcube, float *hrimage, int *indxpix, int Nfft, int Npix, int Nrebin, int Nsub, int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int N = Npix * Nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
hipLaunchKernelGGL(( fillbincube_krnl), dim3(grid), dim3(threads), 0, 0, bcube,hrimage,indxpix,Nfft,Npix,Nrebin,N);
cutilCheckMsg("fillbincube_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
template<class T> __global__ void indexfill_krnl(T *odata, T *idata, int *indx,int ntot, int Ntot, int N)
{
int nim, idim;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / ntot;
idim = tid - (nim * ntot);
odata[indx[idim] + (nim * Ntot)] = idata[tid];
tid += blockDim.x * gridDim.x;
}
}
int indexfill(float *d_odata,float *d_idata,int *indx,int nx, int Nx, int N,int device)
{
int ntot = nx * nx;
int Ntot = Nx * Nx;
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
hipLaunchKernelGGL(( indexfill_krnl), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata,indx, ntot, Ntot, N);
return EXIT_SUCCESS;
}
__global__ void conv_krnl(cuFloatComplex *odata,cuFloatComplex *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cuFloatComplex tmp;
while (tid < N) {
tmp.x = idata[tid].x*odata[tid].x-idata[tid].y*odata[tid].y;
tmp.y = idata[tid].y*odata[tid].x+idata[tid].x*odata[tid].y;
odata[tid] = tmp;
tid += blockDim.x * gridDim.x;
}
}
int convolve(cuFloatComplex *d_odata,cuFloatComplex *d_idata,int N,int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
hipLaunchKernelGGL(( conv_krnl), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, N);
return EXIT_SUCCESS;
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T> __device__ void reduce_krnl(T *sdata, int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
sdata[n] += sdata[n + s];
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
sdata[n] += sdata[n + s];
}
__syncthreads();
}
}
}
template <class T> __device__ void scanmax_krnl(T *sdata, int *values,int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
if (sdata[n] < sdata[n + s]) {
values[n] = n + s;
sdata[n] = sdata[n+s];
}
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
if (sdata[n] < sdata[n + s]) {
values[n] = n + s;
sdata[n] = sdata[n+s];
}
}
__syncthreads();
}
}
}
template <class T>
__device__ inline void mswap(T & a, T & b)
{
T tmp = a;
a = b;
b = tmp;
}
template <class T> __device__ inline void sortmax_krnl(T *sdata, unsigned int *values,int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
if (sdata[n] < sdata[n + s]) {
mswap(values[n],values[n+s]);
mswap(sdata[n],sdata[n+s]);
}
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
if (sdata[n] < sdata[n + s]) {
mswap(values[n],values[n+s]);
mswap(sdata[n],sdata[n+s]);
}
}
__syncthreads();
}
}
}
template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <class T> __global__ void reduce2(T *g_idata, T *g_odata, T thresh, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <class T> __global__ void sortmax(T *g_idata, T *g_odata, int *values, int nmax)
{
extern __shared__ uint svalues[];
T *sdata = (T*)&svalues[blockDim.x];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
svalues[tid] = tid;
sdata[tid] = g_idata[i];
__syncthreads();
for (int cc=0;cc<nmax;cc++) {
if (tid >= cc) sortmax_krnl(&(sdata[cc]),&(svalues[cc]),blockDim.x-cc,tid-cc);
__syncthreads();
}
if (tid < nmax) {
g_odata[nmax*blockIdx.x+tid] = sdata[tid];
values[nmax*blockIdx.x+tid] = svalues[tid];
}
__syncthreads();
}
template <class T> __global__ void centroid_max(T *g_idata, T *g_odata, int n, int nmax, int nsub)
{
extern __shared__ uint svalues[];
T *sdata = (T*)&svalues[blockDim.x];
T subsum;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
svalues[tid] = tid;
sdata[tid] = g_idata[i];
__syncthreads();
for (int cc=0;cc<nmax;cc++) {
if (tid >= cc) sortmax_krnl(&(sdata[cc]),&(svalues[cc]),blockDim.x-cc,tid-cc);
__syncthreads();
}
// at this point the nmax first elements of sdata are the nmax brightest
// pixels and the nmax first elements of svalues are their positions
// first copy the brightest values out for reduction
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid] = sdata[tid-nmax];
__syncthreads();
reduce_krnl(sdata,nmax,tid);
// get the sum per subap
if (tid == 0) subsum = sdata[tid];
// put back the brightest pixels values
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid-nmax] = sdata[tid];
__syncthreads();
// compute the centroid on the first part of the array
if (tid < nmax) sdata[tid] *= ((svalues[tid] % n) + 1);
// x centroid
__syncthreads();
reduce_krnl(sdata,nmax,tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[tid]/subsum;
// put back the brightest pixels values
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid-nmax] = sdata[tid];
__syncthreads();
// compute the centroid on the first part of the array
if (tid < nmax) sdata[tid] *= (svalues[tid] / n + 1);
// y centroid
__syncthreads();
reduce_krnl(sdata,nmax,tid);
if (tid == 0) g_odata[blockIdx.x+nsub] = sdata[tid]/subsum;
}
template <class T> void subap_reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( reduce2<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
cutilCheckMsg("reduce_kernel<<<>>> execution failed\n");
}
template void subap_reduce<float>(int size, int threads, int blocks, float *d_idata, float *d_odata);
template void subap_reduce<double>(int size, int threads, int blocks, double *d_idata, double *d_odata);
template <class T> void subap_reduce(int size, int threads, int blocks, T *d_idata, T *d_odata, T thresh)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( reduce2<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, thresh, size);
cutilCheckMsg("reduce_kernel<<<>>> execution failed\n");
}
template void subap_reduce<float>(int size, int threads, int blocks, float *d_idata, float *d_odata, float thresh);
template void subap_reduce<double>(int size, int threads, int blocks, double *d_idata, double *d_odata, double thresh);
template <class T> void subap_sortmax(int size, int threads, int blocks, T *d_idata, T *d_odata, int *values, int nmax)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(T) + sizeof(uint)) : threads * (sizeof(T) + sizeof(uint));
hipLaunchKernelGGL(( sortmax<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, values, nmax);
cutilCheckMsg("sortmax_kernel<<<>>> execution failed\n");
}
template void subap_sortmax<float>(int size, int threads, int blocks, float *d_idata, float *d_odata, int *values, int nmax);
template void subap_sortmax<double>(int size, int threads, int blocks, double *d_idata, double *d_odata, int *values, int nmax);
template <class T> void subap_centromax(int threads, int blocks, T *d_idata, T *d_odata, int npix, int nmax)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(T) + sizeof(uint)) : threads * (sizeof(T) + sizeof(uint));
hipLaunchKernelGGL(( centroid_max<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata,npix, nmax,blocks);
cutilCheckMsg("sortmax_kernel<<<>>> execution failed\n");
}
template void subap_centromax<float>( int threads, int blocks, float *d_idata, float *d_odata, int npix, int nmax);
template void subap_centromax<double>(int threads, int blocks, double *d_idata, double *d_odata, int npix, int nmax);
template <class T> __global__ void reduce_phasex(T *g_idata, T *g_odata, int *indx, unsigned int n, T alpha)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * n * n;
sdata[tid] = g_idata[indx[i+tid*n+n-1]]-g_idata[indx[i+tid*n]];
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0] / n * alpha;
}
}
template <class T> __global__ void reduce_phasey(T *g_idata, T *g_odata, int *indx, unsigned int n, T alpha)
// FIXME
// full parallelization would require to use 4 x threads
// instead of doing 4 operations per threads
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * n * n + threadIdx.x;
sdata[tid] = g_idata[indx[i+(n-1)*n]]-g_idata[indx[i]];
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0]/n*alpha;
}
}
template <class T> void phase_reduce(int threads, int blocks, T *d_idata, T *d_odata, int *indx, T alpha)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( reduce_phasex<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, indx, threads, alpha);
cutilCheckMsg("reduce_phasex_kernel<<<>>> execution failed\n");
hipLaunchKernelGGL(( reduce_phasey<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, &(d_odata[blocks]), indx, threads, alpha);
cutilCheckMsg("reduce_phasey_kernel<<<>>> execution failed\n");
}
template void phase_reduce<float>(int threads, int blocks, float *d_idata, float *d_odata, int *indx, float alpha);
template void phase_reduce<double>(int threads, int blocks, double *d_idata, double *d_odata, int *indx, double alpha);
template <class T> __global__ void derive_phasex(T *g_idata, T *g_odata, int *indx, T *mask, T alpha, unsigned int n, unsigned int N, float *fluxPerSub)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
if (tid % n == 0) {//start of a column
sdata[tid] = (g_idata[indx[i+1]] - g_idata[indx[i]]) * mask[indx[i]];
} else {
if ((tid+1) % n == 0) {//end of a column
sdata[tid] = (g_idata[indx[i]] - g_idata[indx[i-1]]) * mask[indx[i]];
} else
sdata[tid] = (g_idata[indx[i+1]] - g_idata[indx[i-1]]) * mask[indx[i]];
}
}
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0] / n / 2 * alpha / fluxPerSub[blockIdx.x];
}
template <class T> __global__ void derive_phasey(T *g_idata, T *g_odata, int *indx, T *mask, T alpha, unsigned int n, unsigned int N, float *fluxPerSub)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
if (tid < n) {//start of a column
sdata[tid] = (g_idata[indx[i+n]] - g_idata[indx[i]]) * mask[indx[i]];
} else {
if (tid >= n*(n-1)) {//end of a column
sdata[tid] = (g_idata[indx[i]] - g_idata[indx[i-n]]) * mask[indx[i]];
} else
sdata[tid] = (g_idata[indx[i+n]] - g_idata[indx[i-n]]) * mask[indx[i]];
}
}
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0] / n / 2 * alpha / fluxPerSub[blockIdx.x];
}
template <class T> void phase_derive(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, int *indx, T *mask, T alpha, float *fluxPerSub)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( derive_phasex<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, indx, mask, alpha, n, size,fluxPerSub);
cutilCheckMsg("phase_derivex_kernel<<<>>> execution failed\n");
hipLaunchKernelGGL(( derive_phasey<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, &(d_odata[blocks]), indx, mask, alpha, n, size,fluxPerSub);
cutilCheckMsg("phase_derivey_kernel<<<>>> execution failed\n");
}
template void phase_derive<float>(int size, int threads, int blocks, int n, float *d_idata, float *d_odata, int *indx, float *mask,float alpha, float *fluxPerSub);
template void phase_derive<double>(int size, int threads, int blocks, int n, double *d_idata, double *d_odata, int *indx, double *mask,double alpha, float *fluxPerSub);
template <class T> __global__ void centroidx(T *g_idata, T *g_odata, T *alpha, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < N) ? g_idata[i] * ((tid % n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> __global__ void centroidy(T *g_idata, T *g_odata, T *alpha, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < N) ? g_idata[i] * ((tid / n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> void get_centroids(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, T *alpha)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( centroidx<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, alpha, n, size);
cutilCheckMsg("centroidx_kernel<<<>>> execution failed\n");
hipLaunchKernelGGL(( centroidy<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, &(d_odata[blocks]), alpha, n, size);
cutilCheckMsg("centroidy_kernel<<<>>> execution failed\n");
}
template void get_centroids<float>(int size, int threads, int blocks, int n,float *d_idata, float *d_odata,float *alpha);
template void get_centroids<double>(int size, int threads, int blocks, int n,double *d_idata, double *d_odata, double *alpha);
template <class T> __global__ void centroidx(T *g_idata, T *g_odata, T *alpha, T thresh, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] * ((tid % n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> __global__ void centroidy(T *g_idata, T *g_odata, T *alpha, T thresh, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] * ((tid / n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> void get_centroids(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, T *alpha, T thresh)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
hipLaunchKernelGGL(( centroidx<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, alpha, thresh, n, size);
cutilCheckMsg("centroidx_kernel<<<>>> execution failed\n");
hipLaunchKernelGGL(( centroidy<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, &(d_odata[blocks]), alpha, thresh, n, size);
cutilCheckMsg("centroidy_kernel<<<>>> execution failed\n");
}
template void get_centroids<float>(int size, int threads, int blocks, int n,float *d_idata, float *d_odata,float *alpha, float thresh);
template void get_centroids<double>(int size, int threads, int blocks, int n,double *d_idata, double *d_odata, double *alpha, double thresh);
__global__ void fillcorr_krnl(cuFloatComplex *d_out, float *d_in,int npix_in,int npix_out,int N)
{
int nim,npix,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / npix_in;
npix = tid % npix_in;
idx = nim * npix_out + npix;
d_out[idx].x = d_in[tid];
d_out[idx].y = 0.0;
tid += blockDim.x * gridDim.x;
}
}
int fill_corr(cuFloatComplex *d_out, float *d_in, int npix_in, int npix_out, int N, int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
hipLaunchKernelGGL(( fillcorr_krnl), dim3(grid), dim3(threads), 0, 0, d_out,d_in,npix_in,npix_out,N);
cutilCheckMsg("fillcorr_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void fillval_krnl(cuFloatComplex *d_out, float val,int npix_in,int npix_out,int N)
{
int nim,npix,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / npix_in;
npix = tid % npix_in;
idx = nim * npix_out + npix;
d_out[idx].x = val;
d_out[idx].y = 0.0;
tid += blockDim.x * gridDim.x;
}
}
int fillval_corr(cuFloatComplex *d_out, float val, int npix_in, int npix_out, int N, int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
hipLaunchKernelGGL(( fillval_krnl), dim3(grid), dim3(threads), 0, 0, d_out,val,npix_in,npix_out,N);
cutilCheckMsg("fillcorr_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void corr_krnl(cuFloatComplex *odata,cuFloatComplex *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cuFloatComplex tmp;
while (tid < N) {
tmp.x = idata[tid].x*odata[tid].x+idata[tid].y*odata[tid].y;
tmp.y = -1.0f*idata[tid].y*odata[tid].x+idata[tid].x*odata[tid].y;
odata[tid] = tmp;
tid += blockDim.x * gridDim.x;
}
}
int correl(cuFloatComplex *d_odata,cuFloatComplex *d_idata,int N,int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
hipLaunchKernelGGL(( corr_krnl), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, N);
return EXIT_SUCCESS;
}
__global__ void corrnorm_krnl(float *odata,float *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
odata[tid] /= (idata[tid]+1.e-6);
tid += blockDim.x * gridDim.x;
}
}
int corr_norm(float *d_odata,float *d_idata,int N,int device)
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
hipLaunchKernelGGL(( corrnorm_krnl), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, N);
return EXIT_SUCCESS;
}
/*
__global__ void fillcamplipup_krnl(cuFloatComplex *amplipup, float *phase,float *offset, float *mask, int *indx, int Nfft,
int Npup, int npup, int N)
{
int nim,idim,idimx,idimy,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / Npup;
idim = tid - nim * Npup;
idimx = idim % npup;
idimy = idim / npup;
idx = idimx + idimy * Nfft + nim * Nfft * Nfft;
amplipup[idx].x = (cosf(phase[indx[tid]]-offset[idim]))*mask[indx[tid]];
amplipup[idx].y = (sinf(phase[indx[tid]]-offset[idim]))*mask[indx[tid]];
tid += blockDim.x * gridDim.x;
}
}
int fillcamplipup(cuFloatComplex *amplipup, float *phase, float *offset, float *mask, int *indx, int Nfft, int Npup, int Nsub,
int npup, int device)
// here amplipup is a cube of data of size nfft x nfft x nsubap
// phase is an array of size pupdiam x pupdiam
// offset is an array of size pdiam x pdiam
// mask is an array of size pupdiam x pupdiam
// indx is an array of size pdiam x pdiam x nsubap
// number of thread required : pdiam x pdiam x nsubap
// Npup = pdiam x pdiam
{
struct hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, device);
int N = Npup * Nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
fillcamplipup_krnl<<<grid, threads>>>(amplipup,phase,offset,mask,indx,Nfft,Npup,npup,N);
cutilCheckMsg("fillcamplipup_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
*/
| d0ab1a386c7e54ae5b936374cbf06df282d11f10.cu | #include <yoga_wfs.h>
#include <yoga_ao_utils.h>
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T> struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<> struct SharedMemory<double>
{
__device__ inline operator double*()
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
__device__ inline operator const double*() const
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
};
bool isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
__global__ void camplipup_krnl(cuFloatComplex *amplipup, float *phase,float *offset, float *mask, int *istart,
int *jstart, int *ivalid, int *jvalid, int nphase, int nphase2, int npup, int Nfft, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
int nim = tid / nphase2;
int idim = tid - nim * nphase2;
int idimx = idim % nphase; // nphase : size of the phase support in subaps
int idimy = idim / nphase;
int idphase = idimx + idimy * npup + istart[ivalid[nim]] + jstart[jvalid[nim]] * npup;
// npup : size of the input phase screen
int idx = idimx + idimy * Nfft + nim * Nfft * Nfft;
amplipup[idx].x = (cosf(phase[idphase]-offset[idim]))*mask[idphase];
amplipup[idx].y = (sinf(phase[idphase]-offset[idim]))*mask[idphase];
tid += blockDim.x * gridDim.x;
}
}
int fillcamplipup2(cuFloatComplex *amplipup, float *phase, float *offset, float *mask, int *istart, int *jstart,
int *ivalid, int *jvalid, int nphase, int npup, int Nfft, int Ntot, int device)
// here amplipup is a cube of data of size nfft x nfft x nsubap
// phase is an array of size pupdiam x pupdiam
// offset is an array of size pdiam x pdiam
// mask is an array of size pupdiam x pupdiam
// number of thread required : pdiam x pdiam x nsubap
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (Ntot + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (Ntot + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
int nphase2 = nphase * nphase;
camplipup_krnl<<<grid, threads>>>(amplipup,phase,offset,mask,istart,jstart,ivalid,jvalid,nphase,nphase2,npup,Nfft,Ntot);
cutilCheckMsg("fillcamplipup_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void bimg_krnl(float *bimage, float *bcube, int npix, int npix2, int nsub, int *ivalid, int *jvalid, float alpha, int N)
{
/*
indx is an array nrebin^2 * npix^2
it gives the nrebin x nrebin pixels in the hrimage per npix x npix pixels of the subap
Npix = npix x npix
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
int nim = tid / npix2;
int tidim = tid - nim * npix2;
int xim = tidim % npix;
int yim = tidim / npix;
int idbin = xim + yim * nsub + ivalid[nim] * npix + jvalid[nim] * npix * nsub;
bimage[idbin] = alpha * bimage[idbin] + bcube[tid];
tid += blockDim.x * gridDim.x;
}
}
int fillbinimg(float *bimage, float *bcube, int npix, int nsub, int Nsub, int *ivalid, int *jvalid, bool add, int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int Npix = npix * npix;
int N = Npix * nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
float alpha;
if (add) alpha = 1.0f;
else alpha = 0.0f;
bimg_krnl<<<grid, threads>>>(bimage,bcube,npix,Npix,Nsub,ivalid,jvalid,alpha,N);
cutilCheckMsg("binimg_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void fillbincube_krnl(float *bcube, float *hrimage, int *indxpix, int Nfft, int Npix, int Nrebin, int N)
{
/*
indx is an array nrebin^2 * npix^2
it gives the nrebin x nrebin pixels in the hrimage per npix x npix pixels of the subap
Npix = npix x npix
*/
int npix,nsubap,nrebin;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nsubap = tid / Npix;
npix = tid % Npix;
for (int i=0;i<Nrebin;i++) {
nrebin = indxpix[i+npix*Nrebin];
bcube[tid] += hrimage[nrebin + Nfft*nsubap];
}
tid += blockDim.x * gridDim.x;
}
}
int fillbincube(float *bcube, float *hrimage, int *indxpix, int Nfft, int Npix, int Nrebin, int Nsub, int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int N = Npix * Nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
fillbincube_krnl<<<grid, threads>>>(bcube,hrimage,indxpix,Nfft,Npix,Nrebin,N);
cutilCheckMsg("fillbincube_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
template<class T> __global__ void indexfill_krnl(T *odata, T *idata, int *indx,int ntot, int Ntot, int N)
{
int nim, idim;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / ntot;
idim = tid - (nim * ntot);
odata[indx[idim] + (nim * Ntot)] = idata[tid];
tid += blockDim.x * gridDim.x;
}
}
int indexfill(float *d_odata,float *d_idata,int *indx,int nx, int Nx, int N,int device)
{
int ntot = nx * nx;
int Ntot = Nx * Nx;
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
indexfill_krnl<<<grid, threads>>>(d_odata, d_idata,indx, ntot, Ntot, N);
return EXIT_SUCCESS;
}
__global__ void conv_krnl(cuFloatComplex *odata,cuFloatComplex *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cuFloatComplex tmp;
while (tid < N) {
tmp.x = idata[tid].x*odata[tid].x-idata[tid].y*odata[tid].y;
tmp.y = idata[tid].y*odata[tid].x+idata[tid].x*odata[tid].y;
odata[tid] = tmp;
tid += blockDim.x * gridDim.x;
}
}
int convolve(cuFloatComplex *d_odata,cuFloatComplex *d_idata,int N,int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
// dim3 grid(128), threads(128);
conv_krnl<<<grid, threads>>>(d_odata, d_idata, N);
return EXIT_SUCCESS;
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
template <class T> __device__ void reduce_krnl(T *sdata, int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
sdata[n] += sdata[n + s];
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
sdata[n] += sdata[n + s];
}
__syncthreads();
}
}
}
template <class T> __device__ void scanmax_krnl(T *sdata, int *values,int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
if (sdata[n] < sdata[n + s]) {
values[n] = n + s;
sdata[n] = sdata[n+s];
}
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
if (sdata[n] < sdata[n + s]) {
values[n] = n + s;
sdata[n] = sdata[n+s];
}
}
__syncthreads();
}
}
}
template <class T>
__device__ inline void mswap(T & a, T & b)
{
T tmp = a;
a = b;
b = tmp;
}
template <class T> __device__ inline void sortmax_krnl(T *sdata, unsigned int *values,int size, int n)
{
if (!((size&(size-1))==0)) {
unsigned int s;
if (size %2 != 0) s = size/2+1;
else s = size/2;
unsigned int s_old = size;
while (s>0) {
if ((n < s) && (n + s < s_old)) {
if (sdata[n] < sdata[n + s]) {
mswap(values[n],values[n+s]);
mswap(sdata[n],sdata[n+s]);
}
}
__syncthreads();
s_old = s;
s /= 2;
if ((2*s < s_old) && (s!=0)) s += 1;
}
} else {
// do reduction in shared mem
for(unsigned int s=size/2; s>0; s>>=1) {
if (n < s) {
if (sdata[n] < sdata[n + s]) {
mswap(values[n],values[n+s]);
mswap(sdata[n],sdata[n+s]);
}
}
__syncthreads();
}
}
}
template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <class T> __global__ void reduce2(T *g_idata, T *g_odata, T thresh, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <class T> __global__ void sortmax(T *g_idata, T *g_odata, int *values, int nmax)
{
extern __shared__ uint svalues[];
T *sdata = (T*)&svalues[blockDim.x];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
svalues[tid] = tid;
sdata[tid] = g_idata[i];
__syncthreads();
for (int cc=0;cc<nmax;cc++) {
if (tid >= cc) sortmax_krnl(&(sdata[cc]),&(svalues[cc]),blockDim.x-cc,tid-cc);
__syncthreads();
}
if (tid < nmax) {
g_odata[nmax*blockIdx.x+tid] = sdata[tid];
values[nmax*blockIdx.x+tid] = svalues[tid];
}
__syncthreads();
}
template <class T> __global__ void centroid_max(T *g_idata, T *g_odata, int n, int nmax, int nsub)
{
extern __shared__ uint svalues[];
T *sdata = (T*)&svalues[blockDim.x];
T subsum;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
svalues[tid] = tid;
sdata[tid] = g_idata[i];
__syncthreads();
for (int cc=0;cc<nmax;cc++) {
if (tid >= cc) sortmax_krnl(&(sdata[cc]),&(svalues[cc]),blockDim.x-cc,tid-cc);
__syncthreads();
}
// at this point the nmax first elements of sdata are the nmax brightest
// pixels and the nmax first elements of svalues are their positions
// first copy the brightest values out for reduction
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid] = sdata[tid-nmax];
__syncthreads();
reduce_krnl(sdata,nmax,tid);
// get the sum per subap
if (tid == 0) subsum = sdata[tid];
// put back the brightest pixels values
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid-nmax] = sdata[tid];
__syncthreads();
// compute the centroid on the first part of the array
if (tid < nmax) sdata[tid] *= ((svalues[tid] % n) + 1);
// x centroid
__syncthreads();
reduce_krnl(sdata,nmax,tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[tid]/subsum;
// put back the brightest pixels values
if ((tid >= nmax) && (tid < 2*nmax)) sdata[tid-nmax] = sdata[tid];
__syncthreads();
// compute the centroid on the first part of the array
if (tid < nmax) sdata[tid] *= (svalues[tid] / n + 1);
// y centroid
__syncthreads();
reduce_krnl(sdata,nmax,tid);
if (tid == 0) g_odata[blockIdx.x+nsub] = sdata[tid]/subsum;
}
template <class T> void subap_reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
reduce2<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
cutilCheckMsg("reduce_kernel<<<>>> execution failed\n");
}
template void subap_reduce<float>(int size, int threads, int blocks, float *d_idata, float *d_odata);
template void subap_reduce<double>(int size, int threads, int blocks, double *d_idata, double *d_odata);
template <class T> void subap_reduce(int size, int threads, int blocks, T *d_idata, T *d_odata, T thresh)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
reduce2<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, thresh, size);
cutilCheckMsg("reduce_kernel<<<>>> execution failed\n");
}
template void subap_reduce<float>(int size, int threads, int blocks, float *d_idata, float *d_odata, float thresh);
template void subap_reduce<double>(int size, int threads, int blocks, double *d_idata, double *d_odata, double thresh);
template <class T> void subap_sortmax(int size, int threads, int blocks, T *d_idata, T *d_odata, int *values, int nmax)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(T) + sizeof(uint)) : threads * (sizeof(T) + sizeof(uint));
sortmax<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, values, nmax);
cutilCheckMsg("sortmax_kernel<<<>>> execution failed\n");
}
template void subap_sortmax<float>(int size, int threads, int blocks, float *d_idata, float *d_odata, int *values, int nmax);
template void subap_sortmax<double>(int size, int threads, int blocks, double *d_idata, double *d_odata, int *values, int nmax);
template <class T> void subap_centromax(int threads, int blocks, T *d_idata, T *d_odata, int npix, int nmax)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * (sizeof(T) + sizeof(uint)) : threads * (sizeof(T) + sizeof(uint));
centroid_max<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata,npix, nmax,blocks);
cutilCheckMsg("sortmax_kernel<<<>>> execution failed\n");
}
template void subap_centromax<float>( int threads, int blocks, float *d_idata, float *d_odata, int npix, int nmax);
template void subap_centromax<double>(int threads, int blocks, double *d_idata, double *d_odata, int npix, int nmax);
template <class T> __global__ void reduce_phasex(T *g_idata, T *g_odata, int *indx, unsigned int n, T alpha)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * n * n;
sdata[tid] = g_idata[indx[i+tid*n+n-1]]-g_idata[indx[i+tid*n]];
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0] / n * alpha;
}
}
template <class T> __global__ void reduce_phasey(T *g_idata, T *g_odata, int *indx, unsigned int n, T alpha)
// FIXME
// full parallelization would require to use 4 x threads
// instead of doing 4 operations per threads
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * n * n + threadIdx.x;
sdata[tid] = g_idata[indx[i+(n-1)*n]]-g_idata[indx[i]];
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0]/n*alpha;
}
}
template <class T> void phase_reduce(int threads, int blocks, T *d_idata, T *d_odata, int *indx, T alpha)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
reduce_phasex<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, indx, threads, alpha);
cutilCheckMsg("reduce_phasex_kernel<<<>>> execution failed\n");
reduce_phasey<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, &(d_odata[blocks]), indx, threads, alpha);
cutilCheckMsg("reduce_phasey_kernel<<<>>> execution failed\n");
}
template void phase_reduce<float>(int threads, int blocks, float *d_idata, float *d_odata, int *indx, float alpha);
template void phase_reduce<double>(int threads, int blocks, double *d_idata, double *d_odata, int *indx, double alpha);
template <class T> __global__ void derive_phasex(T *g_idata, T *g_odata, int *indx, T *mask, T alpha, unsigned int n, unsigned int N, float *fluxPerSub)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
if (tid % n == 0) {//start of a column
sdata[tid] = (g_idata[indx[i+1]] - g_idata[indx[i]]) * mask[indx[i]];
} else {
if ((tid+1) % n == 0) {//end of a column
sdata[tid] = (g_idata[indx[i]] - g_idata[indx[i-1]]) * mask[indx[i]];
} else
sdata[tid] = (g_idata[indx[i+1]] - g_idata[indx[i-1]]) * mask[indx[i]];
}
}
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0] / n / 2 * alpha / fluxPerSub[blockIdx.x];
}
template <class T> __global__ void derive_phasey(T *g_idata, T *g_odata, int *indx, T *mask, T alpha, unsigned int n, unsigned int N, float *fluxPerSub)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) {
if (tid < n) {//start of a column
sdata[tid] = (g_idata[indx[i+n]] - g_idata[indx[i]]) * mask[indx[i]];
} else {
if (tid >= n*(n-1)) {//end of a column
sdata[tid] = (g_idata[indx[i]] - g_idata[indx[i-n]]) * mask[indx[i]];
} else
sdata[tid] = (g_idata[indx[i+n]] - g_idata[indx[i-n]]) * mask[indx[i]];
}
}
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0] / n / 2 * alpha / fluxPerSub[blockIdx.x];
}
template <class T> void phase_derive(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, int *indx, T *mask, T alpha, float *fluxPerSub)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
derive_phasex<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, indx, mask, alpha, n, size,fluxPerSub);
cutilCheckMsg("phase_derivex_kernel<<<>>> execution failed\n");
derive_phasey<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, &(d_odata[blocks]), indx, mask, alpha, n, size,fluxPerSub);
cutilCheckMsg("phase_derivey_kernel<<<>>> execution failed\n");
}
template void phase_derive<float>(int size, int threads, int blocks, int n, float *d_idata, float *d_odata, int *indx, float *mask,float alpha, float *fluxPerSub);
template void phase_derive<double>(int size, int threads, int blocks, int n, double *d_idata, double *d_odata, int *indx, double *mask,double alpha, float *fluxPerSub);
template <class T> __global__ void centroidx(T *g_idata, T *g_odata, T *alpha, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < N) ? g_idata[i] * ((tid % n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> __global__ void centroidy(T *g_idata, T *g_odata, T *alpha, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < N) ? g_idata[i] * ((tid / n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> void get_centroids(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, T *alpha)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
centroidx<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, alpha, n, size);
cutilCheckMsg("centroidx_kernel<<<>>> execution failed\n");
centroidy<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, &(d_odata[blocks]), alpha, n, size);
cutilCheckMsg("centroidy_kernel<<<>>> execution failed\n");
}
template void get_centroids<float>(int size, int threads, int blocks, int n,float *d_idata, float *d_odata,float *alpha);
template void get_centroids<double>(int size, int threads, int blocks, int n,double *d_idata, double *d_odata, double *alpha);
template <class T> __global__ void centroidx(T *g_idata, T *g_odata, T *alpha, T thresh, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] * ((tid % n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> __global__ void centroidy(T *g_idata, T *g_odata, T *alpha, T thresh, unsigned int n, unsigned int N)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N) sdata[tid] = (g_idata[i] > thresh) ? g_idata[i] * ((tid / n) +1) : 0;
__syncthreads();
reduce_krnl(sdata,blockDim.x,tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0]*1.0/(alpha[blockIdx.x]+1.e-6);
}
template <class T> void get_centroids(int size, int threads, int blocks, int n, T *d_idata, T *d_odata, T *alpha, T thresh)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
centroidx<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, alpha, thresh, n, size);
cutilCheckMsg("centroidx_kernel<<<>>> execution failed\n");
centroidy<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, &(d_odata[blocks]), alpha, thresh, n, size);
cutilCheckMsg("centroidy_kernel<<<>>> execution failed\n");
}
template void get_centroids<float>(int size, int threads, int blocks, int n,float *d_idata, float *d_odata,float *alpha, float thresh);
template void get_centroids<double>(int size, int threads, int blocks, int n,double *d_idata, double *d_odata, double *alpha, double thresh);
__global__ void fillcorr_krnl(cuFloatComplex *d_out, float *d_in,int npix_in,int npix_out,int N)
{
int nim,npix,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / npix_in;
npix = tid % npix_in;
idx = nim * npix_out + npix;
d_out[idx].x = d_in[tid];
d_out[idx].y = 0.0;
tid += blockDim.x * gridDim.x;
}
}
int fill_corr(cuFloatComplex *d_out, float *d_in, int npix_in, int npix_out, int N, int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
fillcorr_krnl<<<grid, threads>>>(d_out,d_in,npix_in,npix_out,N);
cutilCheckMsg("fillcorr_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void fillval_krnl(cuFloatComplex *d_out, float val,int npix_in,int npix_out,int N)
{
int nim,npix,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / npix_in;
npix = tid % npix_in;
idx = nim * npix_out + npix;
d_out[idx].x = val;
d_out[idx].y = 0.0;
tid += blockDim.x * gridDim.x;
}
}
int fillval_corr(cuFloatComplex *d_out, float val, int npix_in, int npix_out, int N, int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
fillval_krnl<<<grid, threads>>>(d_out,val,npix_in,npix_out,N);
cutilCheckMsg("fillcorr_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
__global__ void corr_krnl(cuFloatComplex *odata,cuFloatComplex *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cuFloatComplex tmp;
while (tid < N) {
tmp.x = idata[tid].x*odata[tid].x+idata[tid].y*odata[tid].y;
tmp.y = -1.0f*idata[tid].y*odata[tid].x+idata[tid].x*odata[tid].y;
odata[tid] = tmp;
tid += blockDim.x * gridDim.x;
}
}
int correl(cuFloatComplex *d_odata,cuFloatComplex *d_idata,int N,int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
corr_krnl<<<grid, threads>>>(d_odata, d_idata, N);
return EXIT_SUCCESS;
}
__global__ void corrnorm_krnl(float *odata,float *idata, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
odata[tid] /= (idata[tid]+1.e-6);
tid += blockDim.x * gridDim.x;
}
}
int corr_norm(float *d_odata,float *d_idata,int N,int device)
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
corrnorm_krnl<<<grid, threads>>>(d_odata, d_idata, N);
return EXIT_SUCCESS;
}
/*
__global__ void fillcamplipup_krnl(cuFloatComplex *amplipup, float *phase,float *offset, float *mask, int *indx, int Nfft,
int Npup, int npup, int N)
{
int nim,idim,idimx,idimy,idx;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
nim = tid / Npup;
idim = tid - nim * Npup;
idimx = idim % npup;
idimy = idim / npup;
idx = idimx + idimy * Nfft + nim * Nfft * Nfft;
amplipup[idx].x = (cosf(phase[indx[tid]]-offset[idim]))*mask[indx[tid]];
amplipup[idx].y = (sinf(phase[indx[tid]]-offset[idim]))*mask[indx[tid]];
tid += blockDim.x * gridDim.x;
}
}
int fillcamplipup(cuFloatComplex *amplipup, float *phase, float *offset, float *mask, int *indx, int Nfft, int Npup, int Nsub,
int npup, int device)
// here amplipup is a cube of data of size nfft x nfft x nsubap
// phase is an array of size pupdiam x pupdiam
// offset is an array of size pdiam x pdiam
// mask is an array of size pupdiam x pupdiam
// indx is an array of size pdiam x pdiam x nsubap
// number of thread required : pdiam x pdiam x nsubap
// Npup = pdiam x pdiam
{
struct cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, device);
int N = Npup * Nsub;
int maxThreads = deviceProperties.maxThreadsPerBlock;
int nBlocks = deviceProperties.multiProcessorCount*8;
int nThreads = (N + nBlocks -1)/nBlocks;
if (nThreads > maxThreads) {
nThreads = maxThreads;
nBlocks = (N + nThreads -1)/nThreads;
}
dim3 grid(nBlocks), threads(nThreads);
fillcamplipup_krnl<<<grid, threads>>>(amplipup,phase,offset,mask,indx,Nfft,Npup,npup,N);
cutilCheckMsg("fillcamplipup_kernel<<<>>> execution failed\n");
return EXIT_SUCCESS;
}
*/
|
e960a66ddf375b13a4ae8c3b33f511a3466026f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <hip/driver_types.h>
#include "MT.h"
MT::MT(int _x, int _y, std::string fuel_model_name, std::string fuel_moisture_name)
: Propagation(_x, _y, fuel_model_name, fuel_moisture_name) {
toa_map_ = (int*) malloc(sim_size_ * sizeof(int));
timesteppers_ = (int*) malloc(2* sizeof(int));
// l_n_ = (float*) malloc(16 * sizeof(float));
}
MT::~MT(){
// Free Host Memory
free(toa_map_);
free(timesteppers_);
free(l_n_);
// Free Device Memory
hipFree(g_toa_map_);
hipFree(g_timesteppers_);
hipFree(g_l_n_);
}
bool MT::Init(std::string fuel_file, std::string terrain_file,
std::string canopy_height_file, std::string crown_base_height_file,
std::string crown_bulk_density_file, std::string wind_x, std::string wind_y,
std::string dynLiveH, std::string dynDead1, std::string csr, std::string msr,
int M_FLAG, int C_FLAG, int S_FLAG) {
// Call Parent Init Fcn
Propagation::Init(fuel_file, terrain_file,
canopy_height_file, crown_base_height_file,
crown_bulk_density_file, wind_x, wind_y, dynLiveH,
dynDead1, csr, msr, M_FLAG, C_FLAG, S_FLAG);
// Initialize TOA Map
for(unsigned int i = 0; i < sim_size_; i++){
toa_map_[i] = simulation_->ign_time_[i];
}
// Initialize TimeNow and TimeNext
timesteppers_[0] = timesteppers_[1] = 0;
// Populate lengths
// for(unsigned int i = 0; i < 16; i++){
// l_n_[i] = simulation_->l_n_[i];
// }
l_n_ = simulation_->l_n_;
return true;
}
bool MT::CopyToDevice(int M_FLAG, int C_FLAG, int S_FLAG) {
Propagation::CopyToDevice(M_FLAG, C_FLAG, S_FLAG);
// Create memory on device
hipError_t err = hipMalloc((void**) &g_toa_map_, sim_size_*sizeof(int));
err = hipMalloc( (void**) &g_wind_x_map_in_, sim_size_*sizeof(float));
err = hipMalloc( (void**) &g_wind_x_map_out_, sim_size_*sizeof(float));
err = hipMalloc( (void**) &g_wind_y_map_in_, sim_size_*sizeof(float));
err = hipMalloc( (void**) &g_wind_y_map_out_, sim_size_*sizeof(float));
err = hipMalloc((void**) &g_timesteppers_, 2*sizeof(int));
err = hipMalloc((void**) &g_l_n_, 16*sizeof(float));
if (err != hipSuccess) {
std::cerr << "Error Allocating Memory in MT Class: " << hipGetErrorString(err) << std::endl;
exit(1);
return false;
}
// Copy data to device
err = hipMemcpy(g_toa_map_, toa_map_, sim_size_*sizeof(int), hipMemcpyHostToDevice);
err = hipMemcpy(g_wind_x_map_in_, wind_x_map_, sim_size_*sizeof(float), hipMemcpyHostToDevice);
err = hipMemcpy(g_wind_x_map_out_, wind_x_map_, sim_size_*sizeof(float), hipMemcpyHostToDevice);
err = hipMemcpy(g_wind_y_map_in_, wind_y_map_, sim_size_*sizeof(float), hipMemcpyHostToDevice);
err = hipMemcpy(g_wind_y_map_out_, wind_y_map_, sim_size_*sizeof(float), hipMemcpyHostToDevice);
err = hipMemcpy(g_timesteppers_, timesteppers_, 2*sizeof(int), hipMemcpyHostToDevice);
err = hipMemcpy(g_l_n_, l_n_, 16* sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cerr << "Error Copying Memory in MT Class: " << hipGetErrorString(err) << std::endl;
exit(1);
return false;
}
return true;
}
bool MT::RunKernel(int start_tick, int B, int T, bool crowning_flag, bool spotting_flag, int stop_tick) {
// printf("Kicking off Kernels\n");
int counter = start_tick;
//int terminate = -1;
// int B = 64;
// int T = 64;
// while(terminate <= 0){
while(counter < stop_tick){
counter++;
// Do calculations
if(crowning_flag)
hipLaunchKernelGGL(( TestCrownRate_NoSpot), dim3(B),dim3(T), 0, 0, g_curspreadrate_, g_maxspreadrate_, g_intensity_modifier_, sim_size_, g_I_o_, g_RAC_);
// Accelerate Fire
hipLaunchKernelGGL(( Accelerate), dim3(B),dim3(T), 0, 0, g_curspreadrate_,g_maxspreadrate_, sim_size_*16, simulation_->time_step_);
// hipLaunchKernelGGL(( MinTime), dim3(B),dim3(T), 0, 0, g_toa_map_, g_maxspreadrate_,
hipLaunchKernelGGL(( MinTime), dim3(B),dim3(T), 0, 0, g_toa_map_, g_curspreadrate_,
g_timesteppers_, g_l_n_, sim_size_,
sim_rows_, sim_cols_);
// Update Time Kernel
hipLaunchKernelGGL(( TimeKernelMT), dim3(1),dim3(1), 0, 0, g_timesteppers_);
hipDeviceSynchronize();
// hipError_t err = hipMemcpyFromSymbol(&terminate, end, sizeof(end), 0,
// hipMemcpyDeviceToHost);
// if (err != hipSuccess) {
// std::cerr << "Error copying from GPU: " << hipGetErrorString(err) << std::endl;
// exit(1);
// return false;
// }
// printf("end: %d\n", terminate);
// if(terminate < 4)
// terminate = -1;
// }
// int finishCount = 0;
// // Catch last corner to terminate simulation
// while(finishCount <= 3){
// counter++;
// finishCount++;
// // Do calculations
// hipLaunchKernelGGL(( MinTime), dim3(B),dim3(T), 0, 0, g_toa_map_, g_maxspreadrate_,
// g_timesteppers_, g_l_n_, sim_size_,
// sim_rows_, sim_cols_);
// // Update Time Kernel
// timeKernelMT<<<1,1>>>(g_timesteppers_);
}
return true;
}
bool MT::CopyFromDevice() {
hipError_t err = hipMemcpy(toa_map_, g_toa_map_, sim_size_ * sizeof(int), hipMemcpyDeviceToHost);
if(err != hipSuccess){
std::cerr << "Error copying from GPU: " << hipGetErrorString(err) << std::endl;
exit(1);
return false;
}
printf("MT Copy WINDX From Device\n");
err = hipMemcpy(wind_x_map_, g_wind_x_map_in_,
sim_size_*sizeof(float),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Error copying from GPU: " << hipGetErrorString(err) << std::endl;
exit(1);
return false;
}
printf("MT Copy WINDY From Device\n");
err = hipMemcpy(wind_y_map_, g_wind_y_map_in_,
sim_size_*sizeof(float),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Error copying from GPU: " << hipGetErrorString(err) << std::endl;
exit(1);
return false;
}
return true;
}
bool MT::WriteToFile(std::string filename) {
std::ofstream fout;
fout.open(filename.c_str());
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (int) toa_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::WindXToFile(std::string filename, std::string* metaptr) {
std::ofstream fout;
fout.open(filename.c_str());
// add metadata to the first eight lines
for(int x = 0; x < 8; x++)
{
fout << metaptr[x];
}
fout << '\n';
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (float) wind_x_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::WindYToFile(std::string filename, std::string* metaptr) {
std::ofstream fout;
fout.open(filename.c_str());
// add metadata to the first eight lines
for(int x = 0; x < 8; x++)
{
fout << metaptr[x];
}
fout << '\n';
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (float) wind_y_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::UpdateCell(int _x, int _y, int val){
if(_x < 0 || _y < 0 || _x > sim_rows_ || _y > sim_cols_)
return false;
int cell = _x * sim_cols_ + _y;
toa_map_[cell] = val;
return true;
}
| e960a66ddf375b13a4ae8c3b33f511a3466026f3.cu | //#include <driver_types.h>
#include "MT.h"
MT::MT(int _x, int _y, std::string fuel_model_name, std::string fuel_moisture_name)
: Propagation(_x, _y, fuel_model_name, fuel_moisture_name) {
toa_map_ = (int*) malloc(sim_size_ * sizeof(int));
timesteppers_ = (int*) malloc(2* sizeof(int));
// l_n_ = (float*) malloc(16 * sizeof(float));
}
MT::~MT(){
// Free Host Memory
free(toa_map_);
free(timesteppers_);
free(l_n_);
// Free Device Memory
cudaFree(g_toa_map_);
cudaFree(g_timesteppers_);
cudaFree(g_l_n_);
}
bool MT::Init(std::string fuel_file, std::string terrain_file,
std::string canopy_height_file, std::string crown_base_height_file,
std::string crown_bulk_density_file, std::string wind_x, std::string wind_y,
std::string dynLiveH, std::string dynDead1, std::string csr, std::string msr,
int M_FLAG, int C_FLAG, int S_FLAG) {
// Call Parent Init Fcn
Propagation::Init(fuel_file, terrain_file,
canopy_height_file, crown_base_height_file,
crown_bulk_density_file, wind_x, wind_y, dynLiveH,
dynDead1, csr, msr, M_FLAG, C_FLAG, S_FLAG);
// Initialize TOA Map
for(unsigned int i = 0; i < sim_size_; i++){
toa_map_[i] = simulation_->ign_time_[i];
}
// Initialize TimeNow and TimeNext
timesteppers_[0] = timesteppers_[1] = 0;
// Populate lengths
// for(unsigned int i = 0; i < 16; i++){
// l_n_[i] = simulation_->l_n_[i];
// }
l_n_ = simulation_->l_n_;
return true;
}
bool MT::CopyToDevice(int M_FLAG, int C_FLAG, int S_FLAG) {
Propagation::CopyToDevice(M_FLAG, C_FLAG, S_FLAG);
// Create memory on device
cudaError_t err = cudaMalloc((void**) &g_toa_map_, sim_size_*sizeof(int));
err = cudaMalloc( (void**) &g_wind_x_map_in_, sim_size_*sizeof(float));
err = cudaMalloc( (void**) &g_wind_x_map_out_, sim_size_*sizeof(float));
err = cudaMalloc( (void**) &g_wind_y_map_in_, sim_size_*sizeof(float));
err = cudaMalloc( (void**) &g_wind_y_map_out_, sim_size_*sizeof(float));
err = cudaMalloc((void**) &g_timesteppers_, 2*sizeof(int));
err = cudaMalloc((void**) &g_l_n_, 16*sizeof(float));
if (err != cudaSuccess) {
std::cerr << "Error Allocating Memory in MT Class: " << cudaGetErrorString(err) << std::endl;
exit(1);
return false;
}
// Copy data to device
err = cudaMemcpy(g_toa_map_, toa_map_, sim_size_*sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_wind_x_map_in_, wind_x_map_, sim_size_*sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_wind_x_map_out_, wind_x_map_, sim_size_*sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_wind_y_map_in_, wind_y_map_, sim_size_*sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_wind_y_map_out_, wind_y_map_, sim_size_*sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_timesteppers_, timesteppers_, 2*sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_l_n_, l_n_, 16* sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << "Error Copying Memory in MT Class: " << cudaGetErrorString(err) << std::endl;
exit(1);
return false;
}
return true;
}
bool MT::RunKernel(int start_tick, int B, int T, bool crowning_flag, bool spotting_flag, int stop_tick) {
// printf("Kicking off Kernels\n");
int counter = start_tick;
//int terminate = -1;
// int B = 64;
// int T = 64;
// while(terminate <= 0){
while(counter < stop_tick){
counter++;
// Do calculations
if(crowning_flag)
TestCrownRate_NoSpot<<<B,T>>>(g_curspreadrate_, g_maxspreadrate_, g_intensity_modifier_, sim_size_, g_I_o_, g_RAC_);
// Accelerate Fire
Accelerate<<<B,T>>>(g_curspreadrate_,g_maxspreadrate_, sim_size_*16, simulation_->time_step_);
// MinTime<<<B,T>>>(g_toa_map_, g_maxspreadrate_,
MinTime<<<B,T>>>(g_toa_map_, g_curspreadrate_,
g_timesteppers_, g_l_n_, sim_size_,
sim_rows_, sim_cols_);
// Update Time Kernel
TimeKernelMT<<<1,1>>>(g_timesteppers_);
cudaDeviceSynchronize();
// cudaError_t err = cudaMemcpyFromSymbol(&terminate, end, sizeof(end), 0,
// cudaMemcpyDeviceToHost);
// if (err != cudaSuccess) {
// std::cerr << "Error copying from GPU: " << cudaGetErrorString(err) << std::endl;
// exit(1);
// return false;
// }
// printf("end: %d\n", terminate);
// if(terminate < 4)
// terminate = -1;
// }
// int finishCount = 0;
// // Catch last corner to terminate simulation
// while(finishCount <= 3){
// counter++;
// finishCount++;
// // Do calculations
// MinTime<<<B,T>>>(g_toa_map_, g_maxspreadrate_,
// g_timesteppers_, g_l_n_, sim_size_,
// sim_rows_, sim_cols_);
// // Update Time Kernel
// timeKernelMT<<<1,1>>>(g_timesteppers_);
}
return true;
}
bool MT::CopyFromDevice() {
cudaError_t err = cudaMemcpy(toa_map_, g_toa_map_, sim_size_ * sizeof(int), cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
std::cerr << "Error copying from GPU: " << cudaGetErrorString(err) << std::endl;
exit(1);
return false;
}
printf("MT Copy WINDX From Device\n");
err = cudaMemcpy(wind_x_map_, g_wind_x_map_in_,
sim_size_*sizeof(float),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Error copying from GPU: " << cudaGetErrorString(err) << std::endl;
exit(1);
return false;
}
printf("MT Copy WINDY From Device\n");
err = cudaMemcpy(wind_y_map_, g_wind_y_map_in_,
sim_size_*sizeof(float),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Error copying from GPU: " << cudaGetErrorString(err) << std::endl;
exit(1);
return false;
}
return true;
}
bool MT::WriteToFile(std::string filename) {
std::ofstream fout;
fout.open(filename.c_str());
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (int) toa_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::WindXToFile(std::string filename, std::string* metaptr) {
std::ofstream fout;
fout.open(filename.c_str());
// add metadata to the first eight lines
for(int x = 0; x < 8; x++)
{
fout << metaptr[x];
}
fout << '\n';
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (float) wind_x_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::WindYToFile(std::string filename, std::string* metaptr) {
std::ofstream fout;
fout.open(filename.c_str());
// add metadata to the first eight lines
for(int x = 0; x < 8; x++)
{
fout << metaptr[x];
}
fout << '\n';
for(unsigned int i = 0; i < sim_size_; i++){
if(i % simulation_->sim_dim_x_ == 0 && i !=0){
fout << '\n';
}
fout << (float) wind_y_map_[i] << ",";
}
fout.close();
return true;
}
bool MT::UpdateCell(int _x, int _y, int val){
if(_x < 0 || _y < 0 || _x > sim_rows_ || _y > sim_cols_)
return false;
int cell = _x * sim_cols_ + _y;
toa_map_[cell] = val;
return true;
}
|
6ab82eca3b453d8c276b18f42c7fbde500a4c467.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "heat2d.h"
void Manage_Memory(int phase, int tid, float **h_u, float **h_ul, float **d_u, float **d_un){
hipError_t Error;
size_t global= ( NX+2)*( NY+2)*sizeof(float);
size_t local = (SNX+2)*(SNY+2)*sizeof(float);
if (phase==0) {
// Allocate domain on host
*h_u = (float*)malloc(global);
}
if (phase==1) {
// Allocate local domain variable on device
*h_ul = (float*)malloc(local);
Error = hipSetDevice(tid); if (DEBUG) printf("CUDA error (hipSetDevice) in thread %d = %s\n",tid,hipGetErrorString(Error));
Error = hipMalloc((void**)d_u ,local); if (DEBUG) printf("CUDA error (hipMalloc d_u ) in thread %d = %s\n",tid,hipGetErrorString(Error));
Error = hipMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (hipMalloc d_un) in thread %d = %s\n",tid,hipGetErrorString(Error));
}
if (phase==2) {
// Free local domain variable on device
Error = hipFree(*d_u ); if (DEBUG) printf("CUDA error (hipFree d_u ) in thread %d = %s\n",tid,hipGetErrorString(Error));
Error = hipFree(*d_un); if (DEBUG) printf("CUDA error (hipFree d_un) in thread %d = %s\n",tid,hipGetErrorString(Error));
free(*h_ul);
}
if (phase==3) {
// Free the domain on host
free(*h_u);
}
}
void Manage_Comms(int phase, int tid, float **h_u, float **h_ul, float **d_u){
hipError_t Error;
if (phase==0) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
Error=hipMemcpy(*d_u,*h_ul,(SNX+2)*(SNY+2)*sizeof(float),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
}
if (phase==1) {
// Copy left, right, up and down "interior" boundary cells from local domain to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=hipMemcpy(*h_u+ 1 +tid*SNX+(NX+2)*(j+1),*d_u+ 1 +(SNX+2)*(j+1),sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
Error=hipMemcpy(*h_u+SNX+tid*SNX+(NX+2)*(j+1),*d_u+SNX+(SNX+2)*(j+1),sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
}
Error=hipMemcpy(*h_u+1+tid*SNX+(NX+2)* 1 ,*d_u+1+(SNX+2)* 1 ,SNX*sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
Error=hipMemcpy(*h_u+1+tid*SNX+(NX+2)*SNY,*d_u+1+(SNX+2)*SNY,SNX*sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
}
if (phase==2) {
// Copy left, right, up and down boundary cells from global domain to local domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=hipMemcpy(*d_u+ 0 +(SNX+2)*(j+1),*h_u+ 0 +tid*SNX+(NX+2)*(j+1),sizeof(float),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
Error=hipMemcpy(*d_u+SNX+1+(SNX+2)*(j+1),*h_u+SNX+1+tid*SNX+(NX+2)*(j+1),sizeof(float),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
}
Error=hipMemcpy(*d_u+1+(SNX+2)* 0 ,*h_u+1+tid*SNX+(NX+2)* 0 ,SNX*sizeof(float),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
Error=hipMemcpy(*d_u+1+(SNX+2)*(SNY+1),*h_u+1+tid*SNX+(NX+2)*(SNY+1),SNX*sizeof(float),hipMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",hipGetErrorString(Error));
}
if (phase==3) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=hipMemcpy(*h_u+1+tid*SNX+(NX+2)*(j+1),*d_u+1+(SNX+2)*(j+1),SNX*sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
}
}
if (phase==4) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
Error=hipMemcpy(*h_ul,*d_u,(SNX+2)*(SNY+2)*sizeof(float),hipMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",hipGetErrorString(Error));
}
}
void Set_IC(float *u0){
// Set Dirichlet boundary conditions in global domain
for (int i = 0; i < NX+2; i++) u0[ i +(NX+2)* 0 ]=0.0; // down
for (int j = 0; j < NY+2; j++) u0[ 0 +(NX+2)* j ]=0.0; // left
for (int i = 0; i < NX+2; i++) u0[ i +(NX+2)*(NY+1)]=1.0; // up
for (int j = 0; j < NY+2; j++) u0[(NX+1)+(NX+2)* j ]=1.0; // right
}
void Call_CPU_Init(float **u0){
// Load the initial condition
Set_IC(*u0);
}
__global__ void Set_GPU_IC(int tid, float *u){
// Build local threads indexes
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int o = i+(SNX+2)*j; u[o] = 0.0;
// Set initial condition only at "interior" nodes
//if (o<(SNX+2)*(SNY+2)) {
//if (i>0 && i<SNX+1 && j>0 && j<SNY+1) {
//switch (tid) {
//case 0: u[o] = 0.10; break;
//case 1: u[o] = 0.25; break;
//case 2: u[o] = 0.40; break;
//case 3: u[o] = 0.50; break;
//case 4: u[o] = 0.75; break;
//case 5: u[o] = 0.90; break;
//}
//}
//}
}
void Call_GPU_Init(int tid, float **ut0){
// Load the initial condition
dim3 dimBlock(NO_threads,NO_threads); // threads per block
dim3 dimGrid(ceil((SNX+2.0f)/NO_threads),ceil((SNY+2.0f)/NO_threads)); // blocks in grid
hipLaunchKernelGGL(( Set_GPU_IC), dim3(dimGrid),dim3(dimBlock), 0, 0, tid,*ut0);
if (DEBUG) printf("CUDA error (Set_GPU_IC) in thread %d = %s\n",tid,hipGetErrorString(hipPeekAtLastError()));
hipError_t Error = hipDeviceSynchronize();
if (DEBUG) printf("CUDA error (Set_GPU_IC Synchronize) %s\n",hipGetErrorString(Error));
}
__global__ void Laplace1d(float *u, float *un){
// local threads indexes
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int o = (i + (SNX+2)*j); // node( j,i ) n
int n = i+(SNX+2)*(j+1); // node(j+1,i) |
int s = i+(SNX+2)*(j-1); // node(j-1,i) w--o--e
int e = (i+1)+(SNX+2)*j; // node(j,i+1) |
int w = (i-1)+(SNX+2)*j; // node(j,i-1) s
// only update "interior" nodes
if(i>0 & i<SNX+1 & j>0 & j<SNY+1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
void Call_Laplace(int tid, float **u, float **un){
// Produce one iteration of the laplace operator
dim3 dimBlock(NO_threads,NO_threads); // threads per block
dim3 dimGrid(ceil((SNX+2.0f)/NO_threads),ceil((SNY+2.0f)/NO_threads)); // blocks in grid
hipLaunchKernelGGL(( Laplace1d), dim3(dimGrid),dim3(dimBlock), 0, 0, *u,*un);
if (DEBUG) printf("CUDA error (Call_Laplace) in thread %d = %s\n",tid,hipGetErrorString(hipPeekAtLastError()));
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY+2; j++) {
for (int i = 0; i < NX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(NX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
void Save_Results_Tid(int tid, float *u){
// print result to txt file
if (tid==0) {
FILE *pFile = fopen("result0.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < SNY+2; j++) {
for (int i = 0; i < SNX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(SNX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
if (tid==1) {
FILE *pFile = fopen("result1.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < SNY+2; j++) {
for (int i = 0; i < SNX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(SNX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
}
| 6ab82eca3b453d8c276b18f42c7fbde500a4c467.cu |
#include "heat2d.h"
void Manage_Memory(int phase, int tid, float **h_u, float **h_ul, float **d_u, float **d_un){
cudaError_t Error;
size_t global= ( NX+2)*( NY+2)*sizeof(float);
size_t local = (SNX+2)*(SNY+2)*sizeof(float);
if (phase==0) {
// Allocate domain on host
*h_u = (float*)malloc(global);
}
if (phase==1) {
// Allocate local domain variable on device
*h_ul = (float*)malloc(local);
Error = cudaSetDevice(tid); if (DEBUG) printf("CUDA error (cudaSetDevice) in thread %d = %s\n",tid,cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_u ,local); if (DEBUG) printf("CUDA error (cudaMalloc d_u ) in thread %d = %s\n",tid,cudaGetErrorString(Error));
Error = cudaMalloc((void**)d_un,local); if (DEBUG) printf("CUDA error (cudaMalloc d_un) in thread %d = %s\n",tid,cudaGetErrorString(Error));
}
if (phase==2) {
// Free local domain variable on device
Error = cudaFree(*d_u ); if (DEBUG) printf("CUDA error (cudaFree d_u ) in thread %d = %s\n",tid,cudaGetErrorString(Error));
Error = cudaFree(*d_un); if (DEBUG) printf("CUDA error (cudaFree d_un) in thread %d = %s\n",tid,cudaGetErrorString(Error));
free(*h_ul);
}
if (phase==3) {
// Free the domain on host
free(*h_u);
}
}
void Manage_Comms(int phase, int tid, float **h_u, float **h_ul, float **d_u){
cudaError_t Error;
if (phase==0) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
Error=cudaMemcpy(*d_u,*h_ul,(SNX+2)*(SNY+2)*sizeof(float),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
}
if (phase==1) {
// Copy left, right, up and down "interior" boundary cells from local domain to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=cudaMemcpy(*h_u+ 1 +tid*SNX+(NX+2)*(j+1),*d_u+ 1 +(SNX+2)*(j+1),sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
Error=cudaMemcpy(*h_u+SNX+tid*SNX+(NX+2)*(j+1),*d_u+SNX+(SNX+2)*(j+1),sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
}
Error=cudaMemcpy(*h_u+1+tid*SNX+(NX+2)* 1 ,*d_u+1+(SNX+2)* 1 ,SNX*sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
Error=cudaMemcpy(*h_u+1+tid*SNX+(NX+2)*SNY,*d_u+1+(SNX+2)*SNY,SNX*sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
}
if (phase==2) {
// Copy left, right, up and down boundary cells from global domain to local domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=cudaMemcpy(*d_u+ 0 +(SNX+2)*(j+1),*h_u+ 0 +tid*SNX+(NX+2)*(j+1),sizeof(float),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
Error=cudaMemcpy(*d_u+SNX+1+(SNX+2)*(j+1),*h_u+SNX+1+tid*SNX+(NX+2)*(j+1),sizeof(float),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
}
Error=cudaMemcpy(*d_u+1+(SNX+2)* 0 ,*h_u+1+tid*SNX+(NX+2)* 0 ,SNX*sizeof(float),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
Error=cudaMemcpy(*d_u+1+(SNX+2)*(SNY+1),*h_u+1+tid*SNX+(NX+2)*(SNY+1),SNX*sizeof(float),cudaMemcpyHostToDevice); if (DEBUG) printf("CUDA error (Memcpy h -> d) = %s \n",cudaGetErrorString(Error));
}
if (phase==3) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
for (int j = 0; j < SNY; j++) {
Error=cudaMemcpy(*h_u+1+tid*SNX+(NX+2)*(j+1),*d_u+1+(SNX+2)*(j+1),SNX*sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
}
}
if (phase==4) {
// Transfer all data from local domains to global domain
if (DEBUG) printf("::: Perform GPU-CPU comms (phase %d, thread %) :::\n",phase,tid);
Error=cudaMemcpy(*h_ul,*d_u,(SNX+2)*(SNY+2)*sizeof(float),cudaMemcpyDeviceToHost); if (DEBUG) printf("CUDA error (Memcpy d -> h) = %s \n",cudaGetErrorString(Error));
}
}
void Set_IC(float *u0){
// Set Dirichlet boundary conditions in global domain
for (int i = 0; i < NX+2; i++) u0[ i +(NX+2)* 0 ]=0.0; // down
for (int j = 0; j < NY+2; j++) u0[ 0 +(NX+2)* j ]=0.0; // left
for (int i = 0; i < NX+2; i++) u0[ i +(NX+2)*(NY+1)]=1.0; // up
for (int j = 0; j < NY+2; j++) u0[(NX+1)+(NX+2)* j ]=1.0; // right
}
void Call_CPU_Init(float **u0){
// Load the initial condition
Set_IC(*u0);
}
__global__ void Set_GPU_IC(int tid, float *u){
// Build local threads indexes
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int o = i+(SNX+2)*j; u[o] = 0.0;
// Set initial condition only at "interior" nodes
//if (o<(SNX+2)*(SNY+2)) {
//if (i>0 && i<SNX+1 && j>0 && j<SNY+1) {
//switch (tid) {
//case 0: u[o] = 0.10; break;
//case 1: u[o] = 0.25; break;
//case 2: u[o] = 0.40; break;
//case 3: u[o] = 0.50; break;
//case 4: u[o] = 0.75; break;
//case 5: u[o] = 0.90; break;
//}
//}
//}
}
void Call_GPU_Init(int tid, float **ut0){
// Load the initial condition
dim3 dimBlock(NO_threads,NO_threads); // threads per block
dim3 dimGrid(ceil((SNX+2.0f)/NO_threads),ceil((SNY+2.0f)/NO_threads)); // blocks in grid
Set_GPU_IC<<<dimGrid,dimBlock>>>(tid,*ut0);
if (DEBUG) printf("CUDA error (Set_GPU_IC) in thread %d = %s\n",tid,cudaGetErrorString(cudaPeekAtLastError()));
cudaError_t Error = cudaDeviceSynchronize();
if (DEBUG) printf("CUDA error (Set_GPU_IC Synchronize) %s\n",cudaGetErrorString(Error));
}
__global__ void Laplace1d(float *u, float *un){
// local threads indexes
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int o = (i + (SNX+2)*j); // node( j,i ) n
int n = i+(SNX+2)*(j+1); // node(j+1,i) |
int s = i+(SNX+2)*(j-1); // node(j-1,i) w--o--e
int e = (i+1)+(SNX+2)*j; // node(j,i+1) |
int w = (i-1)+(SNX+2)*j; // node(j,i-1) s
// only update "interior" nodes
if(i>0 & i<SNX+1 & j>0 & j<SNY+1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
void Call_Laplace(int tid, float **u, float **un){
// Produce one iteration of the laplace operator
dim3 dimBlock(NO_threads,NO_threads); // threads per block
dim3 dimGrid(ceil((SNX+2.0f)/NO_threads),ceil((SNY+2.0f)/NO_threads)); // blocks in grid
Laplace1d<<<dimGrid,dimBlock>>>(*u,*un);
if (DEBUG) printf("CUDA error (Call_Laplace) in thread %d = %s\n",tid,cudaGetErrorString(cudaPeekAtLastError()));
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY+2; j++) {
for (int i = 0; i < NX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(NX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
void Save_Results_Tid(int tid, float *u){
// print result to txt file
if (tid==0) {
FILE *pFile = fopen("result0.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < SNY+2; j++) {
for (int i = 0; i < SNX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(SNX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
if (tid==1) {
FILE *pFile = fopen("result1.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < SNY+2; j++) {
for (int i = 0; i < SNX+2; i++) {
fprintf(pFile, "%d\t %d\t %g\n",i,j,u[i+(SNX+2)*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
}
|
fdb8b09cbf1a2283ffd2bf09a20609a3383bfb50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/utils/cuda_type_utils.cuh"
#include "xlnet_preprocess_kernels.h"
namespace fastertransformer {
/*************Device Function**************/
template<typename T>
int numPerThread()
{
return sizeof(float) / sizeof(T);
}
/********************** Kernels ************************/
// Applied to half or bfloat16
// dim3 grid(batch_size, seq_len);
// getWordEmdK<<<grid, hidden_dim/2,0, stream>>>(word_emb_k, params_word_emb_k, inp_k, seq_len, hidden_dim);
template<typename T>
void __global__ getWordEmdK(T* word_emb_k, T* params_word_emb_k, int* inp_k, int seq_len, int hidden_dim)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int col = threadIdx.x; // the index of column
int row = blockIdx.y; // the index of row
int batch = blockIdx.x; // the index of batch
int index = ldg(inp_k + batch * seq_len + row);
T2 data = ((T2*)params_word_emb_k)[(index * hidden_dim + col * 2) >> 1];
((T2*)word_emb_k)[(batch * seq_len * hidden_dim + row * hidden_dim + col * 2) >> 1] = data;
}
template<>
void __global__ getWordEmdK(float* word_emb_k, float* params_word_emb_k, int* inp_k, int seq_len, int hidden_dim)
{
int col = threadIdx.x; // the index of column
int row = blockIdx.y; // the index of row
int batch = blockIdx.x; // the index of batch
int index = inp_k[batch * seq_len + row];
float data = params_word_emb_k[index * hidden_dim + col];
word_emb_k[batch * seq_len * hidden_dim + row * hidden_dim + col] = data;
}
// Applied to half or bfloat16
template<typename T>
void __global__ getAttnMask(T* attn_mask, float* input_mask, int seq_len)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int in_index = blockIdx.y * blockDim.x + threadIdx.x;
int col = in_index % (seq_len / 2) * 2;
int row = in_index / (seq_len / 2);
int batch = blockIdx.x;
float2 tmp;
if (row < seq_len && col < seq_len - 1) {
float data = 1;
if (col == row) {
data = 0;
}
tmp.x = input_mask[batch * seq_len + col] * data;
col += 1;
data = 1;
if (col == row) {
data = 0;
}
tmp.y = input_mask[batch * seq_len + col] * data;
int out_index = (batch * seq_len * seq_len + row * seq_len + col) >> 1;
((T2*)attn_mask)[out_index] = cuda_cast<T2>(tmp);
}
}
template<>
void __global__ getAttnMask(float* attn_mask, float* input_mask, int seq_len)
{
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
float data = 1;
if (col == row) {
data = 0;
}
float mask = input_mask[batch * seq_len + col];
attn_mask[batch * seq_len * seq_len + row * seq_len + col] = cuda_cast<float>(data * mask);
}
// Applied to half or bfloat16
template<typename T>
void __global__ getSegMat(T* seg_mat, int* seg_id, int seq_len)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
int w[4] = {0, 1, 1, 0};
int d1 = seg_id[batch * seq_len + col];
int d2 = seg_id[batch * seq_len + row];
int d = 0;
d = int(floor(exp(-1 * abs(double(d1 - d2)))));
int index = batch * seq_len * seq_len + row * seq_len + col;
float2 tmp_w;
tmp_w.x = w[d * 2 + 0];
tmp_w.y = w[d * 2 + 1];
((T2*)seg_mat)[index] = cuda_cast<T2>(tmp_w);
}
template<>
void __global__ getSegMat(float* seg_mat, int* seg_id, int seq_len)
{
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
int w[4] = {0, 1, 1, 0};
int d1 = seg_id[batch * seq_len + col];
int d2 = seg_id[batch * seq_len + row];
int d = 0;
d = int(floor(exp(-1 * abs(double(d1 - d2)))));
int index = batch * seq_len * seq_len + row * seq_len + col;
seg_mat[index * 2] = w[d * 2 + 0];
seg_mat[index * 2 + 1] = w[d * 2 + 1];
}
template<typename T>
void __global__ relativePosition(T* attr_k_head_r, int hidden_dim, int seq_len)
{
int row = blockIdx.x; //(0,256)
int col = threadIdx.x; //(0,384)
float freq_seq = col * 2;
float inv_freq = 1 / (pow(10000, freq_seq / (hidden_dim)));
float fwd_pos_seq = seq_len - row;
float pos_emd = inv_freq * fwd_pos_seq;
float s = sinf(pos_emd);
float c = cosf(pos_emd);
attr_k_head_r[row * hidden_dim + col] = cuda_cast<T>(s);
attr_k_head_r[row * hidden_dim + hidden_dim / 2 + col] = cuda_cast<T>(c);
}
/***********************Pre-Process************************/
// Applied to half or bfloat16
template<typename T>
void blockAttnMask<T>(dim3& grid, dim3& block, int batch_size, int seq_len)
{
int numThreads = 512;
int numBlocky = (seq_len * seq_len / 2 - 1) / numThreads + 1;
grid.x = batch_size;
grid.y = numBlocky;
block.x = numThreads;
}
template<>
void blockAttnMask<float>(dim3& grid, dim3& block, int batch_size, int seq_len)
{
grid.x = batch_size;
grid.y = seq_len;
block.x = seq_len;
}
template<typename T>
void genWordEmdK(
int batch_size, int seq_len, int hidden_dim, T* word_emb_k, T* params_word_emb_k, int* inp_k, hipStream_t stream)
{
dim3 grid_word_emd_k(batch_size, seq_len);
dim3 block_word_emd_k(hidden_dim / numPerThread<T>());
hipLaunchKernelGGL(( getWordEmdK), dim3(grid_word_emd_k), dim3(block_word_emd_k), 0, stream,
word_emb_k, params_word_emb_k, inp_k, seq_len, hidden_dim);
}
template<typename T>
void preProcess(int batch_size,
int seq_len,
int hidden_dim,
T* attn_mask,
float* input_mask,
T* seg_mat,
int* seg_id,
T* attr_k_head_r,
hipStream_t stream)
{
dim3 grid_attn_mask;
dim3 block_attn_mask;
blockAttnMask<T>(grid_attn_mask, block_attn_mask, batch_size, seq_len);
hipLaunchKernelGGL(( getAttnMask), dim3(grid_attn_mask), dim3(block_attn_mask), 0, stream, attn_mask, input_mask, seq_len);
dim3 grid_seg_mat(batch_size, seq_len);
dim3 block_seg_mat(seq_len);
hipLaunchKernelGGL(( getSegMat), dim3(grid_seg_mat), dim3(block_seg_mat), 0, stream, seg_mat, seg_id, seq_len);
// relative_positional_encoding
dim3 grid_rel_position(seq_len * 2);
dim3 block_rel_position(hidden_dim / 2);
hipLaunchKernelGGL(( relativePosition), dim3(grid_rel_position), dim3(block_rel_position), 0, stream, attr_k_head_r, hidden_dim, seq_len);
}
template void preProcess<float>(int batch_size,
int seq_len,
int hidden_dim,
float* attn_mask,
float* input_mask,
float* seg_mat,
int* seg_id,
float* attr_k_head_r,
hipStream_t stream);
template void preProcess<half>(int batch_size,
int seq_len,
int hidden_dim,
half* attn_mask,
float* input_mask,
half* seg_mat,
int* seg_id,
half* attr_k_head_r,
hipStream_t stream);
#ifdef ENABLE_BF16
template void preProcess<__nv_bfloat16>(int batch_size,
int seq_len,
int hidden_dim,
__nv_bfloat16* attn_mask,
float* input_mask,
__nv_bfloat16* seg_mat,
int* seg_id,
__nv_bfloat16* attr_k_head_r,
hipStream_t stream);
#endif
template void genWordEmdK<float>(int batch_size,
int seq_len,
int hidden_dim,
float* word_emb_k,
float* params_word_emb_k,
int* inp_k,
hipStream_t stream);
template void genWordEmdK<half>(int batch_size,
int seq_len,
int hidden_dim,
half* word_emb_k,
half* params_word_emb_k,
int* inp_k,
hipStream_t stream);
#ifdef ENABLE_BF16
template void genWordEmdK<__nv_bfloat16>(int batch_size,
int seq_len,
int hidden_dim,
__nv_bfloat16* word_emb_k,
__nv_bfloat16* params_word_emb_k,
int* inp_k,
hipStream_t stream);
#endif
} // namespace fastertransformer
| fdb8b09cbf1a2283ffd2bf09a20609a3383bfb50.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/utils/cuda_type_utils.cuh"
#include "xlnet_preprocess_kernels.h"
namespace fastertransformer {
/*************Device Function**************/
template<typename T>
int numPerThread()
{
return sizeof(float) / sizeof(T);
}
/********************** Kernels ************************/
// Applied to half or bfloat16
// dim3 grid(batch_size, seq_len);
// getWordEmdK<<<grid, hidden_dim/2,0, stream>>>(word_emb_k, params_word_emb_k, inp_k, seq_len, hidden_dim);
template<typename T>
void __global__ getWordEmdK(T* word_emb_k, T* params_word_emb_k, int* inp_k, int seq_len, int hidden_dim)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int col = threadIdx.x; // the index of column
int row = blockIdx.y; // the index of row
int batch = blockIdx.x; // the index of batch
int index = ldg(inp_k + batch * seq_len + row);
T2 data = ((T2*)params_word_emb_k)[(index * hidden_dim + col * 2) >> 1];
((T2*)word_emb_k)[(batch * seq_len * hidden_dim + row * hidden_dim + col * 2) >> 1] = data;
}
template<>
void __global__ getWordEmdK(float* word_emb_k, float* params_word_emb_k, int* inp_k, int seq_len, int hidden_dim)
{
int col = threadIdx.x; // the index of column
int row = blockIdx.y; // the index of row
int batch = blockIdx.x; // the index of batch
int index = inp_k[batch * seq_len + row];
float data = params_word_emb_k[index * hidden_dim + col];
word_emb_k[batch * seq_len * hidden_dim + row * hidden_dim + col] = data;
}
// Applied to half or bfloat16
template<typename T>
void __global__ getAttnMask(T* attn_mask, float* input_mask, int seq_len)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int in_index = blockIdx.y * blockDim.x + threadIdx.x;
int col = in_index % (seq_len / 2) * 2;
int row = in_index / (seq_len / 2);
int batch = blockIdx.x;
float2 tmp;
if (row < seq_len && col < seq_len - 1) {
float data = 1;
if (col == row) {
data = 0;
}
tmp.x = input_mask[batch * seq_len + col] * data;
col += 1;
data = 1;
if (col == row) {
data = 0;
}
tmp.y = input_mask[batch * seq_len + col] * data;
int out_index = (batch * seq_len * seq_len + row * seq_len + col) >> 1;
((T2*)attn_mask)[out_index] = cuda_cast<T2>(tmp);
}
}
template<>
void __global__ getAttnMask(float* attn_mask, float* input_mask, int seq_len)
{
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
float data = 1;
if (col == row) {
data = 0;
}
float mask = input_mask[batch * seq_len + col];
attn_mask[batch * seq_len * seq_len + row * seq_len + col] = cuda_cast<float>(data * mask);
}
// Applied to half or bfloat16
template<typename T>
void __global__ getSegMat(T* seg_mat, int* seg_id, int seq_len)
{
using T2 = typename TypeConverter<T>::Type; // half2 or bfloat162
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
int w[4] = {0, 1, 1, 0};
int d1 = seg_id[batch * seq_len + col];
int d2 = seg_id[batch * seq_len + row];
int d = 0;
d = int(floor(exp(-1 * abs(double(d1 - d2)))));
int index = batch * seq_len * seq_len + row * seq_len + col;
float2 tmp_w;
tmp_w.x = w[d * 2 + 0];
tmp_w.y = w[d * 2 + 1];
((T2*)seg_mat)[index] = cuda_cast<T2>(tmp_w);
}
template<>
void __global__ getSegMat(float* seg_mat, int* seg_id, int seq_len)
{
int col = threadIdx.x;
int row = blockIdx.y;
int batch = blockIdx.x;
int w[4] = {0, 1, 1, 0};
int d1 = seg_id[batch * seq_len + col];
int d2 = seg_id[batch * seq_len + row];
int d = 0;
d = int(floor(exp(-1 * abs(double(d1 - d2)))));
int index = batch * seq_len * seq_len + row * seq_len + col;
seg_mat[index * 2] = w[d * 2 + 0];
seg_mat[index * 2 + 1] = w[d * 2 + 1];
}
template<typename T>
void __global__ relativePosition(T* attr_k_head_r, int hidden_dim, int seq_len)
{
int row = blockIdx.x; //(0,256)
int col = threadIdx.x; //(0,384)
float freq_seq = col * 2;
float inv_freq = 1 / (pow(10000, freq_seq / (hidden_dim)));
float fwd_pos_seq = seq_len - row;
float pos_emd = inv_freq * fwd_pos_seq;
float s = sinf(pos_emd);
float c = cosf(pos_emd);
attr_k_head_r[row * hidden_dim + col] = cuda_cast<T>(s);
attr_k_head_r[row * hidden_dim + hidden_dim / 2 + col] = cuda_cast<T>(c);
}
/***********************Pre-Process************************/
// Applied to half or bfloat16
template<typename T>
void blockAttnMask<T>(dim3& grid, dim3& block, int batch_size, int seq_len)
{
int numThreads = 512;
int numBlocky = (seq_len * seq_len / 2 - 1) / numThreads + 1;
grid.x = batch_size;
grid.y = numBlocky;
block.x = numThreads;
}
template<>
void blockAttnMask<float>(dim3& grid, dim3& block, int batch_size, int seq_len)
{
grid.x = batch_size;
grid.y = seq_len;
block.x = seq_len;
}
template<typename T>
void genWordEmdK(
int batch_size, int seq_len, int hidden_dim, T* word_emb_k, T* params_word_emb_k, int* inp_k, cudaStream_t stream)
{
dim3 grid_word_emd_k(batch_size, seq_len);
dim3 block_word_emd_k(hidden_dim / numPerThread<T>());
getWordEmdK<<<grid_word_emd_k, block_word_emd_k, 0, stream>>>(
word_emb_k, params_word_emb_k, inp_k, seq_len, hidden_dim);
}
template<typename T>
void preProcess(int batch_size,
int seq_len,
int hidden_dim,
T* attn_mask,
float* input_mask,
T* seg_mat,
int* seg_id,
T* attr_k_head_r,
cudaStream_t stream)
{
dim3 grid_attn_mask;
dim3 block_attn_mask;
blockAttnMask<T>(grid_attn_mask, block_attn_mask, batch_size, seq_len);
getAttnMask<<<grid_attn_mask, block_attn_mask, 0, stream>>>(attn_mask, input_mask, seq_len);
dim3 grid_seg_mat(batch_size, seq_len);
dim3 block_seg_mat(seq_len);
getSegMat<<<grid_seg_mat, block_seg_mat, 0, stream>>>(seg_mat, seg_id, seq_len);
// relative_positional_encoding
dim3 grid_rel_position(seq_len * 2);
dim3 block_rel_position(hidden_dim / 2);
relativePosition<<<grid_rel_position, block_rel_position, 0, stream>>>(attr_k_head_r, hidden_dim, seq_len);
}
template void preProcess<float>(int batch_size,
int seq_len,
int hidden_dim,
float* attn_mask,
float* input_mask,
float* seg_mat,
int* seg_id,
float* attr_k_head_r,
cudaStream_t stream);
template void preProcess<half>(int batch_size,
int seq_len,
int hidden_dim,
half* attn_mask,
float* input_mask,
half* seg_mat,
int* seg_id,
half* attr_k_head_r,
cudaStream_t stream);
#ifdef ENABLE_BF16
template void preProcess<__nv_bfloat16>(int batch_size,
int seq_len,
int hidden_dim,
__nv_bfloat16* attn_mask,
float* input_mask,
__nv_bfloat16* seg_mat,
int* seg_id,
__nv_bfloat16* attr_k_head_r,
cudaStream_t stream);
#endif
template void genWordEmdK<float>(int batch_size,
int seq_len,
int hidden_dim,
float* word_emb_k,
float* params_word_emb_k,
int* inp_k,
cudaStream_t stream);
template void genWordEmdK<half>(int batch_size,
int seq_len,
int hidden_dim,
half* word_emb_k,
half* params_word_emb_k,
int* inp_k,
cudaStream_t stream);
#ifdef ENABLE_BF16
template void genWordEmdK<__nv_bfloat16>(int batch_size,
int seq_len,
int hidden_dim,
__nv_bfloat16* word_emb_k,
__nv_bfloat16* params_word_emb_k,
int* inp_k,
cudaStream_t stream);
#endif
} // namespace fastertransformer
|
738c8cc8bb573c61fdc59676e2d4ad2264fdcbb5.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Convolution 3D profiling
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "conv3d_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cutlass::library;
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Conv3dOperationProfiler::Conv3dOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kConv3d,
{
{ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"},
{ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"d", "input_d"}, "Input D dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"t", "filter_t"}, "Filter T dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"z", "output_z"}, "Output Z dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"pad_d"}, "Padding in D direction"},
{ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"},
{ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"},
{ArgumentTypeID::kInteger, {"stride_d"}, "Stride in D direction"},
{ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"},
{ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"},
{ArgumentTypeID::kInteger, {"dilation_d"}, "Dilation in D direction"},
{ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"},
{ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"},
{ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"},
{ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"},
{ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"},
{ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"},
{ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"},
},
{ library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN }
) {
description_ = " Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D)";
}
/// Destructor
Conv3dOperationProfiler::~Conv3dOperationProfiler() {
}
/// Prints usage statement for the math function
void Conv3dOperationProfiler::print_usage(std::ostream &out) const {
out << "Conv3d" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Conv3dOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular convolution (specify all the convolution parameters):\n"
<< " $ cutlass_profiler --operation=Conv3d"
" --Activation=f16:ndhwc --Filter=f16:ndhwc --Output=f16 --accumulator-type=f32"
" --n=32 --d=16 --h=14 --w=14 --c=8 --k=64 --t=3 --r=3 --s=3"
" --pad_d=1 --pad_h=1 --pad_w=1"
" --stride_d=1 --stride::h=1 --stride::w=1"
" --dilation_d=1 --dilation::h=1 --dilation::w=1\n\n";
}
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Total number of bytes loaded
int64_t Conv3dOperationProfiler::Conv3dProblem::bytes(library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes_ =
int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
}
return bytes_;
}
/// Total number of flops computed
int64_t Conv3dOperationProfiler::Conv3dProblem::flops(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2;
int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2;
// Adjust mainloop flop for dgrad strided
if (operation_desc.conv_kind == library::ConvKind::kDgrad) {
flops_mainloop_ = flops_mainloop_ / ( stride_d * stride_h * stride_w);
}
return (flops_mainloop_ + flops_epilogue_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Conv3dOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(operation->description());
if (!arg_as_int(problem_.n, "n", problem_space, problem)) {
// default value
problem_.n = 1;
}
if (!arg_as_int(problem_.d, "d", problem_space, problem)) {
// default value
problem_.d = 8;
}
if (!arg_as_int(problem_.h, "h", problem_space, problem)) {
// default value
problem_.h = 14;
}
if (!arg_as_int(problem_.w, "w", problem_space, problem)) {
// default value
problem_.w = 14;
}
if (!arg_as_int(problem_.c, "c", problem_space, problem)) {
// default value
problem_.c = 32;
}
if (!arg_as_int(problem_.k, "k", problem_space, problem)) {
// default value
problem_.k = 32;
}
if (!arg_as_int(problem_.t, "t", problem_space, problem)) {
// default value
problem_.t = 3;
}
if (!arg_as_int(problem_.r, "r", problem_space, problem)) {
// default value
problem_.r = 3;
}
if (!arg_as_int(problem_.s, "s", problem_space, problem)) {
// default value
problem_.s = 3;
}
if (!arg_as_int(problem_.pad_d, "pad_d", problem_space, problem)) {
// default value
problem_.pad_d = 1;
}
if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) {
// default value
problem_.pad_w = 1;
}
if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) {
// default value
problem_.pad_h = 1;
}
if (!arg_as_int(problem_.stride_d, "stride_d", problem_space, problem)) {
// default value
problem_.stride_d = 1;
}
if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) {
// default value
problem_.stride_h = 1;
}
if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) {
// default value
problem_.stride_w = 1;
}
if (!arg_as_int(problem_.dilation_d, "dilation_d", problem_space, problem)) {
// default value
problem_.dilation_d = 1;
}
if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) {
// default value
problem_.dilation_h = 1;
}
if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) {
// default value
problem_.dilation_w = 1;
}
//////////////////////// Convolution output dimensions p and q ////////////////////////
// Cutlass convolutions support arbitrary output sizes and not constriant by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Thus, when output p and q dimensions are unspecified by the user //
// cutlass profiler sets p and q which are cuDNN compliant. //
// //
////////////////////////////////////////////////////////////////////////////////////////
// set convolution output z
if (!arg_as_int(problem_.z, "z", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.z = (
problem_.d +
2 * problem_.pad_d -
((problem_.t - 1) * problem_.dilation_d + 1)
) / (problem_.stride_d)
+ 1;
}
// set convolution output p
if (!arg_as_int(problem_.p, "p", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.p = (
problem_.h +
2 * problem_.pad_h -
((problem_.r - 1) * problem_.dilation_h + 1)
) / (problem_.stride_h)
+ 1;
}
// set convolution output q
if (!arg_as_int(problem_.q, "q", problem_space, problem)) {
// default value (set using cudnn formula for output width, when q is not provided)
problem_.q = (
problem_.w +
2 * problem_.pad_w -
((problem_.s - 1) * problem_.dilation_w + 1)
) / (problem_.stride_w)
+ 1;
}
/////////////////////////////////////////////////////////////////////////////////////////
if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
problem_.split_k_mode = library::SplitKMode::kSerial;
}
if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
problem_.split_k_slices = 1;
}
if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) {
// default value
problem_.conv_mode = library::ConvModeID::kCrossCorrelation;
}
if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) {
// default value
problem_.eq_gemm_provider = library::Provider::kNone;
}
if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
problem_.alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
problem_.beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
// initialize library::ConvConfiguration
conv_workspace_.configuration.problem_size = conv::Conv3dProblemSize(
int(problem_.n),
int(problem_.d),
int(problem_.h),
int(problem_.w),
int(problem_.c),
int(problem_.k),
int(problem_.t),
int(problem_.r),
int(problem_.s),
int(problem_.z),
int(problem_.p),
int(problem_.q),
int(problem_.pad_d),
int(problem_.pad_h),
int(problem_.pad_w),
int(problem_.stride_d),
int(problem_.stride_h),
int(problem_.stride_w),
int(problem_.dilation_d),
int(problem_.dilation_h),
int(problem_.dilation_w),
static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)),
int(problem_.split_k_slices),
1 // groups
);
conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode));
conv_workspace_.configuration.layout_activations.stride() = make_Coord(
int(problem_.c),
int(problem_.w) * int(problem_.c),
int(problem_.h) * int(problem_.w) * int(problem_.c),
int(problem_.d) * int(problem_.h) * int(problem_.w) * int(problem_.c)
);
conv_workspace_.configuration.layout_filters.stride() = make_Coord(
int(problem_.c),
int(problem_.s) * int(problem_.c),
int(problem_.r) * int(problem_.s) * int(problem_.c),
int(problem_.t) * int(problem_.r) * int(problem_.s) * int(problem_.c)
);
conv_workspace_.configuration.layout_output.stride() = make_Coord(
int(problem_.k),
int(problem_.q) * int(problem_.k),
int(problem_.q) * int(problem_.p) * int(problem_.k),
int(problem_.z) * int(problem_.q) * int(problem_.p) * int(problem_.k)
);
// initialize library::ConvArguments
conv_workspace_.arguments.A = nullptr;
conv_workspace_.arguments.B = nullptr;
conv_workspace_.arguments.C = nullptr;
conv_workspace_.arguments.D = nullptr;
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode not supported for conv3d
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments);
}
/// Initializes the performance result
void Conv3dOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::ConvDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
result.arguments.resize(problem_space.rank());
set_argument(result, "Activation", problem_space,
std::string(library::to_string(operation_desc.activation().element))
+ ":" + library::to_string(operation_desc.activation().layout));
set_argument(result, "Filter", problem_space,
std::string(library::to_string(operation_desc.filter().element))
+ ":" + library::to_string(operation_desc.filter().layout));
set_argument(result, "Output", problem_space,
std::string(library::to_string(operation_desc.output().element))
+ ":" + library::to_string(operation_desc.output().layout));
set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind));
set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm)));
set_argument(result, "n", problem_space, problem_.n);
set_argument(result, "d", problem_space, problem_.d);
set_argument(result, "h", problem_space, problem_.h);
set_argument(result, "w", problem_space, problem_.w);
set_argument(result, "c", problem_space, problem_.c);
set_argument(result, "k", problem_space, problem_.k);
set_argument(result, "t", problem_space, problem_.t);
set_argument(result, "r", problem_space, problem_.r);
set_argument(result, "s", problem_space, problem_.s);
set_argument(result, "z", problem_space, problem_.z);
set_argument(result, "p", problem_space, problem_.p);
set_argument(result, "q", problem_space, problem_.q);
set_argument(result, "pad_d", problem_space, problem_.pad_d);
set_argument(result, "pad_h", problem_space, problem_.pad_h);
set_argument(result, "pad_w", problem_space, problem_.pad_w);
set_argument(result, "stride_d", problem_space, problem_.stride_d);
set_argument(result, "stride_h", problem_space, problem_.stride_h);
set_argument(result, "stride_w", problem_space, problem_.stride_w);
set_argument(result, "dilation_d", problem_space, problem_.dilation_d);
set_argument(result, "dilation_h", problem_space, problem_.dilation_h);
set_argument(result, "dilation_w", problem_space, problem_.dilation_w);
set_argument(result, "split_k_mode", problem_space,
std::string(library::to_string(problem_.split_k_mode)));
set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices);
set_argument(result, "conv_mode", problem_space,
std::string(library::to_string(problem_.conv_mode)));
set_argument(result, "alpha", problem_space,
library::lexical_cast(problem_.alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(problem_.beta, operation_desc.element_epilogue));
set_argument(result, "eq_gemm_provider", problem_space,
std::string(library::to_string(problem_.eq_gemm_provider)));
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Bytes of activation, filter, and output tensors
result.bytes = problem_.bytes(operation_desc);
// Theoritical flops required for the computation
result.flops = problem_.flops(operation_desc);
// Measured runtime
result.runtime = 0;
}
/// Initialize reduction problem dimenstions and library::Operation
bool Conv3dOperationProfiler::initialize_reduction_configuration_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &conv_desc =
static_cast<library::ConvDescription const &>(operation->description());
library::ConvKind const &conv_kind = conv_desc.conv_kind;
if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) {
return false;
}
/// This chooses the appropriate stride element of the row-major C tensor.
int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 3 : 0);
/// intialize library::ReductionConfiguration
conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn();
conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product();
conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
conv_desc.tile_description.math_instruction.element_accumulator, // element workspace
conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator
conv_desc.C.element, // element output
conv_desc.element_epilogue // element compute
);
#if 0// debug print to check which reduction instance is selected
std::cout << reduction_key << "\n";
#endif
auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key);
if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k conv2d operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status Conv3dOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(underlying_operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
conv_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
conv_workspace_.problem_count = 1;
}
}
else {
conv_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
conv_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
problem_.extent_a(operation_desc.conv_kind),
conv_workspace_.stride_a(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.stride_b(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration);
conv_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration);
conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&conv_workspace_.configuration,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration);
conv_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&conv_workspace_.reduction_configuration,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kConv3d;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Conv3dOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
hipError_t result;
// Initialize structure containing Conv arguments
set_cutlass_operator_arguments_();
conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data());
//
// Run the CUTLASS operation
//
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
#if 0
std::cout << "profiling : " << std::endl
<< "conv2d : " << operation->description().name << std::endl
<< "underlying conv2d : " << underlying_operation->description().name << std::endl
<< "reduction : " << reduction_op_->description().name << std::endl;
#endif
// run cutlass conv2d operation
results_.back().status = underlying_operation->run(
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
// Synchronize before running device reference
result = hipDeviceSynchronize();
if (result != hipSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUDNN
// Run verification cudnn reference
if (options.verification.provider_enabled(library::Provider::kCUDNN)) {
// Guard against unsupported cases
auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description());
Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration);
// Initialize reference data to the source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
if (status == Status::kSuccess) {
// call cudnn verification if supported
verify_with_cudnn_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else if (status == Status::kErrorInvalidProblem) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem;
}
else {
// set verification map for cudnn to not supported
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUDNN
// Run verification host reference
if (options.verification.provider_enabled(library::Provider::kReferenceHost)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_host_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv3dOperationProfiler::verify_with_host_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find host reference operation using conv functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv_key(
library::Provider::kReferenceHost,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
#if 0 // debug print to check which host refererence instance is selected
std::cout << conv_key << "\n";
#endif
auto operators_it = Singleton::get().operation_table.conv3d_operations.find(conv_key);
if(operators_it == Singleton::get().operation_table.conv3d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// conv3d host reference minimum cc is 0 (CPU) and no iterator algorithm
library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// host refernce has only one instances in ConvOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Copy input tensors A, B, and C from device to host buffers
//
conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes());
conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes());
conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes());
conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data());
conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data());
conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data());
//
// Initialize structure containing Conv3d arguments
//
conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data();
conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data();
conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Intialize host reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
//
// Run host reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified;
return true;
}
//
// Copy host reference output to device memory for equality check on device
//
conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D);
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceHost);
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv3dOperationProfiler::verify_with_device_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// TODO: verify cutlass conv3d against device reference
// Return true means continue profiling
return true;
}
/// Measures performance results
bool Conv3dOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
set_cutlass_operator_arguments_();
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data()
);
}
return true;
}
/// Updates the arguments structure for the CUTLASS operator based on
/// the problem index.
void Conv3dOperationProfiler::set_cutlass_operator_arguments_(int problem_idx) {
// Initialize structure containing Conv3d arguments
conv_workspace_.arguments.A = conv_workspace_.A->batch_data(problem_idx);
conv_workspace_.arguments.B = conv_workspace_.B->batch_data(problem_idx);
conv_workspace_.arguments.C = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.arguments.D = conv_workspace_.Computed->batch_data(problem_idx);
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// intialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
}
/// Method to profile a CUTLASS Operation
Status Conv3dOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
// Setup rotating workspace
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % conv_workspace_.problem_count);
set_cutlass_operator_arguments_(problem_idx);
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Setup rotating workspace
int problem_idx = (iteration % conv_workspace_.problem_count);
set_cutlass_operator_arguments_(problem_idx);
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUDNN
/// Verifies CUTLASS against cudnn reference
bool Conv3dOperationProfiler::verify_with_cudnn_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description());
//
// Construct cudnn operators
//
CudnnCreate handle;
cudnnStatus_t status = handle.get_cudnn_create_status();
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Initialize state
//
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// cuDNN does not support four tensor arguments, so we copy the tensor C data into
// tensor D.
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
conv_workspace_.arguments.C = conv_workspace_.arguments.D;
try {
//
// Construct dispatcher to cudnn operator
//
detail::cudnnConvDispatcher conv_op(
conv_desc,
conv_workspace_.configuration,
conv_workspace_.arguments,
handle
);
if (conv_op.status != Status::kSuccess) {
if (conv_op.status == Status::kErrorNotSupported) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
} else {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
return true;
}
status = conv_op(handle);
// Handle errors
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
conv_desc,
library::Provider::kCUTLASS,
library::Provider::kCUDNN);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
// Return true means continue profiling
return true;
}
#endif // #if CUTLASS_ENABLE_CUDNN
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 738c8cc8bb573c61fdc59676e2d4ad2264fdcbb5.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Convolution 3D profiling
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "conv3d_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cutlass::library;
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
Conv3dOperationProfiler::Conv3dOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kConv3d,
{
{ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"},
{ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"d", "input_d"}, "Input D dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"t", "filter_t"}, "Filter T dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"z", "output_z"}, "Output Z dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv3d problem space"},
{ArgumentTypeID::kInteger, {"pad_d"}, "Padding in D direction"},
{ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"},
{ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"},
{ArgumentTypeID::kInteger, {"stride_d"}, "Stride in D direction"},
{ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"},
{ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"},
{ArgumentTypeID::kInteger, {"dilation_d"}, "Dilation in D direction"},
{ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"},
{ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"},
{ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"},
{ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"},
{ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"},
{ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"},
{ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"},
},
{ library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN }
) {
description_ = " Conv3d operation. Output(Tensor5D) = alpha * Input(Tensor5D) * Filter(Tensor5D) + beta * Input(Tensor5D)";
}
/// Destructor
Conv3dOperationProfiler::~Conv3dOperationProfiler() {
}
/// Prints usage statement for the math function
void Conv3dOperationProfiler::print_usage(std::ostream &out) const {
out << "Conv3d" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void Conv3dOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular convolution (specify all the convolution parameters):\n"
<< " $ cutlass_profiler --operation=Conv3d"
" --Activation=f16:ndhwc --Filter=f16:ndhwc --Output=f16 --accumulator-type=f32"
" --n=32 --d=16 --h=14 --w=14 --c=8 --k=64 --t=3 --r=3 --s=3"
" --pad_d=1 --pad_h=1 --pad_w=1"
" --stride_d=1 --stride::h=1 --stride::w=1"
" --dilation_d=1 --dilation::h=1 --dilation::w=1\n\n";
}
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Total number of bytes loaded
int64_t Conv3dOperationProfiler::Conv3dProblem::bytes(library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes_ =
int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() +
int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
}
return bytes_;
}
/// Total number of flops computed
int64_t Conv3dOperationProfiler::Conv3dProblem::flops(
library::ConvDescription const &operation_desc) const {
cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2;
int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2;
// Adjust mainloop flop for dgrad strided
if (operation_desc.conv_kind == library::ConvKind::kDgrad) {
flops_mainloop_ = flops_mainloop_ / ( stride_d * stride_h * stride_w);
}
return (flops_mainloop_ + flops_epilogue_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status Conv3dOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(operation->description());
if (!arg_as_int(problem_.n, "n", problem_space, problem)) {
// default value
problem_.n = 1;
}
if (!arg_as_int(problem_.d, "d", problem_space, problem)) {
// default value
problem_.d = 8;
}
if (!arg_as_int(problem_.h, "h", problem_space, problem)) {
// default value
problem_.h = 14;
}
if (!arg_as_int(problem_.w, "w", problem_space, problem)) {
// default value
problem_.w = 14;
}
if (!arg_as_int(problem_.c, "c", problem_space, problem)) {
// default value
problem_.c = 32;
}
if (!arg_as_int(problem_.k, "k", problem_space, problem)) {
// default value
problem_.k = 32;
}
if (!arg_as_int(problem_.t, "t", problem_space, problem)) {
// default value
problem_.t = 3;
}
if (!arg_as_int(problem_.r, "r", problem_space, problem)) {
// default value
problem_.r = 3;
}
if (!arg_as_int(problem_.s, "s", problem_space, problem)) {
// default value
problem_.s = 3;
}
if (!arg_as_int(problem_.pad_d, "pad_d", problem_space, problem)) {
// default value
problem_.pad_d = 1;
}
if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) {
// default value
problem_.pad_w = 1;
}
if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) {
// default value
problem_.pad_h = 1;
}
if (!arg_as_int(problem_.stride_d, "stride_d", problem_space, problem)) {
// default value
problem_.stride_d = 1;
}
if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) {
// default value
problem_.stride_h = 1;
}
if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) {
// default value
problem_.stride_w = 1;
}
if (!arg_as_int(problem_.dilation_d, "dilation_d", problem_space, problem)) {
// default value
problem_.dilation_d = 1;
}
if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) {
// default value
problem_.dilation_h = 1;
}
if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) {
// default value
problem_.dilation_w = 1;
}
//////////////////////// Convolution output dimensions p and q ////////////////////////
// Cutlass convolutions support arbitrary output sizes and not constriant by //
// input, filter, padding, striding, dilation sizes. //
// cuDNN sets the output dimensions (p, q) using following equations: //
// //
// output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
// where; div_up(a, b) : (a - 1)/b + 1 //
// //
// Thus, when output p and q dimensions are unspecified by the user //
// cutlass profiler sets p and q which are cuDNN compliant. //
// //
////////////////////////////////////////////////////////////////////////////////////////
// set convolution output z
if (!arg_as_int(problem_.z, "z", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.z = (
problem_.d +
2 * problem_.pad_d -
((problem_.t - 1) * problem_.dilation_d + 1)
) / (problem_.stride_d)
+ 1;
}
// set convolution output p
if (!arg_as_int(problem_.p, "p", problem_space, problem)) {
// default value (set using cudnn formula for output height, when p is not provided)
problem_.p = (
problem_.h +
2 * problem_.pad_h -
((problem_.r - 1) * problem_.dilation_h + 1)
) / (problem_.stride_h)
+ 1;
}
// set convolution output q
if (!arg_as_int(problem_.q, "q", problem_space, problem)) {
// default value (set using cudnn formula for output width, when q is not provided)
problem_.q = (
problem_.w +
2 * problem_.pad_w -
((problem_.s - 1) * problem_.dilation_w + 1)
) / (problem_.stride_w)
+ 1;
}
/////////////////////////////////////////////////////////////////////////////////////////
if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) {
// default value
problem_.split_k_mode = library::SplitKMode::kSerial;
}
if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
problem_.split_k_slices = 1;
}
if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) {
// default value
problem_.conv_mode = library::ConvModeID::kCrossCorrelation;
}
if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) {
// default value
problem_.eq_gemm_provider = library::Provider::kNone;
}
if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
problem_.alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
problem_.beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
// initialize library::ConvConfiguration
conv_workspace_.configuration.problem_size = conv::Conv3dProblemSize(
int(problem_.n),
int(problem_.d),
int(problem_.h),
int(problem_.w),
int(problem_.c),
int(problem_.k),
int(problem_.t),
int(problem_.r),
int(problem_.s),
int(problem_.z),
int(problem_.p),
int(problem_.q),
int(problem_.pad_d),
int(problem_.pad_h),
int(problem_.pad_w),
int(problem_.stride_d),
int(problem_.stride_h),
int(problem_.stride_w),
int(problem_.dilation_d),
int(problem_.dilation_h),
int(problem_.dilation_w),
static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)),
int(problem_.split_k_slices),
1 // groups
);
conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode));
conv_workspace_.configuration.layout_activations.stride() = make_Coord(
int(problem_.c),
int(problem_.w) * int(problem_.c),
int(problem_.h) * int(problem_.w) * int(problem_.c),
int(problem_.d) * int(problem_.h) * int(problem_.w) * int(problem_.c)
);
conv_workspace_.configuration.layout_filters.stride() = make_Coord(
int(problem_.c),
int(problem_.s) * int(problem_.c),
int(problem_.r) * int(problem_.s) * int(problem_.c),
int(problem_.t) * int(problem_.r) * int(problem_.s) * int(problem_.c)
);
conv_workspace_.configuration.layout_output.stride() = make_Coord(
int(problem_.k),
int(problem_.q) * int(problem_.k),
int(problem_.q) * int(problem_.p) * int(problem_.k),
int(problem_.z) * int(problem_.q) * int(problem_.p) * int(problem_.k)
);
// initialize library::ConvArguments
conv_workspace_.arguments.A = nullptr;
conv_workspace_.arguments.B = nullptr;
conv_workspace_.arguments.C = nullptr;
conv_workspace_.arguments.D = nullptr;
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// initialize reduction operation for parallel splitKMode not supported for conv3d
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) {
return Status::kErrorInternal;
}
}
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments);
}
/// Initializes the performance result
void Conv3dOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::ConvDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
result.arguments.resize(problem_space.rank());
set_argument(result, "Activation", problem_space,
std::string(library::to_string(operation_desc.activation().element))
+ ":" + library::to_string(operation_desc.activation().layout));
set_argument(result, "Filter", problem_space,
std::string(library::to_string(operation_desc.filter().element))
+ ":" + library::to_string(operation_desc.filter().layout));
set_argument(result, "Output", problem_space,
std::string(library::to_string(operation_desc.output().element))
+ ":" + library::to_string(operation_desc.output().layout));
set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind));
set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm)));
set_argument(result, "n", problem_space, problem_.n);
set_argument(result, "d", problem_space, problem_.d);
set_argument(result, "h", problem_space, problem_.h);
set_argument(result, "w", problem_space, problem_.w);
set_argument(result, "c", problem_space, problem_.c);
set_argument(result, "k", problem_space, problem_.k);
set_argument(result, "t", problem_space, problem_.t);
set_argument(result, "r", problem_space, problem_.r);
set_argument(result, "s", problem_space, problem_.s);
set_argument(result, "z", problem_space, problem_.z);
set_argument(result, "p", problem_space, problem_.p);
set_argument(result, "q", problem_space, problem_.q);
set_argument(result, "pad_d", problem_space, problem_.pad_d);
set_argument(result, "pad_h", problem_space, problem_.pad_h);
set_argument(result, "pad_w", problem_space, problem_.pad_w);
set_argument(result, "stride_d", problem_space, problem_.stride_d);
set_argument(result, "stride_h", problem_space, problem_.stride_h);
set_argument(result, "stride_w", problem_space, problem_.stride_w);
set_argument(result, "dilation_d", problem_space, problem_.dilation_d);
set_argument(result, "dilation_h", problem_space, problem_.dilation_h);
set_argument(result, "dilation_w", problem_space, problem_.dilation_w);
set_argument(result, "split_k_mode", problem_space,
std::string(library::to_string(problem_.split_k_mode)));
set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices);
set_argument(result, "conv_mode", problem_space,
std::string(library::to_string(problem_.conv_mode)));
set_argument(result, "alpha", problem_space,
library::lexical_cast(problem_.alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(problem_.beta, operation_desc.element_epilogue));
set_argument(result, "eq_gemm_provider", problem_space,
std::string(library::to_string(problem_.eq_gemm_provider)));
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
// Bytes of activation, filter, and output tensors
result.bytes = problem_.bytes(operation_desc);
// Theoritical flops required for the computation
result.flops = problem_.flops(operation_desc);
// Measured runtime
result.runtime = 0;
}
/// Initialize reduction problem dimenstions and library::Operation
bool Conv3dOperationProfiler::initialize_reduction_configuration_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::ConvDescription const &conv_desc =
static_cast<library::ConvDescription const &>(operation->description());
library::ConvKind const &conv_kind = conv_desc.conv_kind;
if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) {
return false;
}
if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) {
return false;
}
/// This chooses the appropriate stride element of the row-major C tensor.
int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 3 : 0);
/// intialize library::ReductionConfiguration
conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn();
conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product();
conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.layout_c(conv_kind).stride()[tensor_c_stride_idx];
// find reduction operation
library::ReductionFunctionalKey reduction_key(
library::Provider::kCUTLASS,
conv_desc.tile_description.math_instruction.element_accumulator, // element workspace
conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator
conv_desc.C.element, // element output
conv_desc.element_epilogue // element compute
);
#if 0// debug print to check which reduction instance is selected
std::cout << reduction_key << "\n";
#endif
auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key);
if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) {
return false;
}
// initialize reduction operation required for parallel split-k conv2d operator
reduction_op_ = reduction_it->second;
// reduction operation found and initialized
return true;
}
/// Initializes workspace
Status Conv3dOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
library::ConvDescription const &operation_desc =
static_cast<library::ConvDescription const &>(underlying_operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
conv_workspace_.problem_count =
1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes);
}
else {
conv_workspace_.problem_count = 1;
}
}
else {
conv_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
conv_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
problem_.extent_a(operation_desc.conv_kind),
conv_workspace_.stride_a(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.B = device_context.allocate_tensor(
options,
"B",
operation_desc.B.element,
operation_desc.B.layout,
problem_.extent_b(operation_desc.conv_kind),
conv_workspace_.stride_b(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
conv_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
problem_.extent_c(operation_desc.conv_kind),
conv_workspace_.stride_c(operation_desc.conv_kind),
conv_workspace_.problem_count
);
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration);
conv_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration);
conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = underlying_operation->initialize(
&conv_workspace_.configuration,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (status != Status::kSuccess) {
return status;
}
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration);
conv_workspace_.reduction_host_workspace.resize(workspace_size, 0);
status = reduction_op_->initialize(
&conv_workspace_.reduction_configuration,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (status != Status::kSuccess) {
return status;
}
}
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kConv3d;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool Conv3dOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
cudaError_t result;
// Initialize structure containing Conv arguments
set_cutlass_operator_arguments_();
conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data());
//
// Run the CUTLASS operation
//
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
#if 0
std::cout << "profiling : " << std::endl
<< "conv2d : " << operation->description().name << std::endl
<< "underlying conv2d : " << underlying_operation->description().name << std::endl
<< "reduction : " << reduction_op_->description().name << std::endl;
#endif
// run cutlass conv2d operation
results_.back().status = underlying_operation->run(
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
results_.back().status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
}
// Synchronize before running device reference
result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUDNN
// Run verification cudnn reference
if (options.verification.provider_enabled(library::Provider::kCUDNN)) {
// Guard against unsupported cases
auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description());
Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration);
// Initialize reference data to the source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
if (status == Status::kSuccess) {
// call cudnn verification if supported
verify_with_cudnn_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else if (status == Status::kErrorInvalidProblem) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem;
}
else {
// set verification map for cudnn to not supported
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUDNN
// Run verification host reference
if (options.verification.provider_enabled(library::Provider::kReferenceHost)) {
// Restore reference data back to initial source data
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
verify_with_host_reference_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv3dOperationProfiler::verify_with_host_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
Status status;
//
// Find host reference operation using conv functional description key
//
library::OperationDescription const &desc = operation->description();
auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
library::ConvFunctionalKey conv_key(
library::Provider::kReferenceHost,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.C.element,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
#if 0 // debug print to check which host refererence instance is selected
std::cout << conv_key << "\n";
#endif
auto operators_it = Singleton::get().operation_table.conv3d_operations.find(conv_key);
if(operators_it == Singleton::get().operation_table.conv3d_operations.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// conv3d host reference minimum cc is 0 (CPU) and no iterator algorithm
library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone);
auto cc_it = operators_it->second.find(preference_key);
if(cc_it == operators_it->second.end()) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
return true;
}
// host refernce has only one instances in ConvOperationVectorMap
library::Operation const *reference_op = cc_it->second[0];
//
// Copy input tensors A, B, and C from device to host buffers
//
conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes());
conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes());
conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes());
conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data());
conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data());
conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data());
//
// Initialize structure containing Conv3d arguments
//
conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data();
conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data();
conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Intialize host reference operation
//
std::vector<uint8_t> host_workspace_reference_op;
uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
host_workspace_reference_op.resize(workspace_size, 0);
reference_op->initialize(
&conv_workspace_.configuration,
host_workspace_reference_op.data());
//
// Run host reference operation
//
status = reference_op->run(
&conv_workspace_.arguments,
host_workspace_reference_op.data());
// Handle errors
if (status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified;
return true;
}
//
// Copy host reference output to device memory for equality check on device
//
conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D);
//
// Verify results
//
results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference,
conv_workspace_.Computed->batch_stride()
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
static_cast<library::ConvDescription const &>(operation->description()),
library::Provider::kCUTLASS,
library::Provider::kReferenceHost);
}
// Return true means continue profiling
return true;
}
/// Verifies CUTLASS against host reference
bool Conv3dOperationProfiler::verify_with_device_reference_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
// TODO: verify cutlass conv3d against device reference
// Return true means continue profiling
return true;
}
/// Measures performance results
bool Conv3dOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
set_cutlass_operator_arguments_();
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&conv_workspace_.arguments,
conv_workspace_.host_workspace.data(),
conv_workspace_.device_workspace.data()
);
}
return true;
}
/// Updates the arguments structure for the CUTLASS operator based on
/// the problem index.
void Conv3dOperationProfiler::set_cutlass_operator_arguments_(int problem_idx) {
// Initialize structure containing Conv3d arguments
conv_workspace_.arguments.A = conv_workspace_.A->batch_data(problem_idx);
conv_workspace_.arguments.B = conv_workspace_.B->batch_data(problem_idx);
conv_workspace_.arguments.C = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.arguments.D = conv_workspace_.Computed->batch_data(problem_idx);
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
// update library::ConvArguments for parallel split-k reduction
conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
conv_workspace_.arguments.alpha = problem_.alpha_one.data();
conv_workspace_.arguments.beta = problem_.beta_zero.data();
/// intialize library::ReductionArguments
conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
conv_workspace_.reduction_arguments.beta = problem_.beta.data();
conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
}
}
/// Method to profile a CUTLASS Operation
Status Conv3dOperationProfiler::profile_cutlass_(
double &runtime,
Options const &options,
library::Operation const *operation,
void *arguments,
void *host_workspace,
void *device_workspace) {
GpuTimer timer;
// initialize conv2d underlying operation to handle parallel reduction
library::Operation const* underlying_operation = operation;
if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
return Status::kErrorNotSupported;
}
}
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
// Setup rotating workspace
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % conv_workspace_.problem_count);
set_cutlass_operator_arguments_(problem_idx);
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Setup rotating workspace
int problem_idx = (iteration % conv_workspace_.problem_count);
set_cutlass_operator_arguments_(problem_idx);
// Run underlying conv2d operation
status = underlying_operation->run(
arguments,
host_workspace,
device_workspace);
// Run parallel reduction kernel for parallel split_k_mode
if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
status = reduction_op_->run(
&conv_workspace_.reduction_arguments,
conv_workspace_.reduction_host_workspace.data(),
nullptr);
}
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_CUDNN
/// Verifies CUTLASS against cudnn reference
bool Conv3dOperationProfiler::verify_with_cudnn_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description());
//
// Construct cudnn operators
//
CudnnCreate handle;
cudnnStatus_t status = handle.get_cudnn_create_status();
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Initialize state
//
// Initialize structure containing Conv2d arguments
conv_workspace_.arguments.A = conv_workspace_.A->data();
conv_workspace_.arguments.B = conv_workspace_.B->data();
conv_workspace_.arguments.D = conv_workspace_.Reference->data();
conv_workspace_.arguments.alpha = problem_.alpha.data();
conv_workspace_.arguments.beta = problem_.beta.data();
conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
// cuDNN does not support four tensor arguments, so we copy the tensor C data into
// tensor D.
conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
conv_workspace_.arguments.C = conv_workspace_.arguments.D;
try {
//
// Construct dispatcher to cudnn operator
//
detail::cudnnConvDispatcher conv_op(
conv_desc,
conv_workspace_.configuration,
conv_workspace_.arguments,
handle
);
if (conv_op.status != Status::kSuccess) {
if (conv_op.status == Status::kErrorNotSupported) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
} else {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
return true;
}
status = conv_op(handle);
// Handle errors
if (status != CUDNN_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors(
options,
*conv_workspace_.Computed,
*conv_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
conv_desc,
library::Provider::kCUTLASS,
library::Provider::kCUDNN);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
}
// Return true means continue profiling
return true;
}
#endif // #if CUTLASS_ENABLE_CUDNN
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
3c0a63128b638f8f2cf9d2c9e82a7ca9b6d47c9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/hip/cub.cuh>
#include <ATen/native/hip/Randperm.cuh>
#include <limits>
namespace at {
namespace native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = ::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = ::min(64,
static_cast<int>(::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::sort_pairs<int, dtype>(
keys.data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::sort_pairs<int64_t, dtype>(
keys.data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
}} // namespace at::native
| 3c0a63128b638f8f2cf9d2c9e82a7ca9b6d47c9a.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/cuda/cub.cuh>
#include <ATen/native/cuda/Randperm.cuh>
#include <limits>
namespace at {
namespace native {
// [Algorithm of randperm]
//
// randperm is implemented by sorting an arange tensor of size n with randomly
// generated keys. When random keys are different from each other, all different
// permutations have the same probability.
//
// However, there is a pitfall here:
// For better performance, these N random keys are generated independently,
// and there is no effort to make sure they are different at the time of generation.
// When two keys are identical, stable sorting algorithms will not permute these two keys.
// As a result, (0, 1) will appear more often than (1, 0).
//
// To overcome this pitfall we first carefully choose the number of bits in these keys,
// so that the probability of having duplicate keys is under a threshold. Let q be the
// threshold probability for having non-duplicate keys, then it can be proved that[1]
// the number of bits required is: ceil(log2(n - (6 n^2 + 1) / (12 log(q))))
//
// Then after sort, we lauch a separate kernel that additionally shuffles any islands
// of values whose keys matched. The algorithm of this kernel is as follows:
// Each thread reads its key and the keys of its neighbors to tell if it's part of an island.
// For each island, the first thread in the island sees a key match at index i+1 but not index i-1.
// This thread considers itself the "island leader". The island leader then reads more indices to
// the right to figure out how big the island is. Most likely, the island will be very small,
// just a few values. The island leader then rolls that many RNG, uses them to additionally
// shuffle values within the island using serial Fisher-Yates, and writes them out.
//
// Reference
// [1] https://osf.io/af2hy/
// The kernels are templated on an opaque, self-aligned type of the correct
// size to avoid redundant kernels for different types of the same size.
namespace {
template <int N> struct alignas(N) OpaqueType { char data[N]; };
}
Tensor& randperm_out_cuda(int64_t n, c10::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
auto range = at::arange(n, result.options());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
void *shuffled_data;
if (result.is_contiguous()) {
shuffled_data = result.data_ptr();
} else {
shuffled = at::empty(n, result.options());
shuffled_data = shuffled.data_ptr();
}
auto opt = TensorOptions().device(result.device());
// See note [Algorithm of randperm]
const double log_threshold_12 = std::log(0.9) * 12;
double nd = static_cast<double>(n);
int bits = std::min(64,
static_cast<int>(std::ceil(std::log2(nd - (6 * nd * nd + 1) / log_threshold_12))));
if (n == 0) {
return result;
} else if (bits <= 32) {
// For asserting device type match of the generator and result,
// we deligate that to the 'random_' function below.
auto keys = at::empty(result.sizes(), opt.dtype(kInt)).random_(
std::numeric_limits<int>::min(), std::numeric_limits<int>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::sort_pairs<int, dtype>(
keys.data_ptr<int>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
} else {
auto keys = at::empty(result.sizes(), opt.dtype(kLong)).random_(
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::max(), generator);
auto keys_tmp = at::empty_like(keys);
auto keys_out = keys_tmp.data_ptr<int64_t>();
AT_DISPATCH_ALL_TYPES_AND(kHalf, result.scalar_type(), "randperm_out_cuda", [&] {
using dtype = OpaqueType<sizeof(scalar_t)>;
auto shuffled_data_ = reinterpret_cast<dtype*>(shuffled_data);
dtype* range_data = reinterpret_cast<dtype*>(range.data_ptr());
at::cuda::cub::sort_pairs<int64_t, dtype>(
keys.data_ptr<int64_t>(), keys_out,
range_data, shuffled_data_,
n, false, 0, bits);
randperm_handle_duplicate_keys(keys_out, shuffled_data_, bits, n, generator);
});
}
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
return result;
}
}} // namespace at::native
|
8c8b7b85b1a59bf083d6d73e57d8b48f34497fac.hip | // !!! This is a file automatically generated by hipify!!!
#if 1
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#include "hip/hip_runtime.h"
#include "../common/cpu_anim.h"
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
};
//__device__ int julia( int x, int y ) {
// const float scale = 1.5;
// float jx = scale * (float)(DIM/2 - x)/(DIM/2);
// float jy = scale * (float)(DIM/2 - y)/(DIM/2);
//
// //hipComplex c(-0.8, 0.156);
// hipComplex c(-0.8, 0.156);//
// hipComplex a(jx, jy);
//
// int i = 0;
// for (i=0; i<200; i++) {
// a = a * a + c;
// if (a.magnitude2() > 1000)
// return 0;
// }
//
// return 1;
//}
__device__ int julia( int x, int y,int ticks ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
//
//double radian = ticks*PI/180;
//float jx1 = jx*cos(radian)-jy*sin(radian);
//float jx2 = jx*sin(radian)+jy*cos(radian);
//hipComplex c(-0.8, 0.156);
hipComplex c(-0.8, 0.156+ticks/1000.0);//
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}//
struct DataBlock {
unsigned char *dev_bitmap; //
CPUAnimBitmap *bitmap;
};
__global__ void kernel( unsigned char *ptr,int ticks ) {
// map from blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;//()+
int y = threadIdx.y + blockIdx.y * blockDim.y;//()+
int offset = x + y * blockDim.x * gridDim.x; //
// now calculate the value at that position
int juliaValue = julia( x, y ,ticks);
/* ptr[offset * 4 + 0] = (255-ticks*20)* juliaValue;
ptr[offset * 4 + 1] = (100+ticks*20) *juliaValue;
ptr[offset * 4 + 2] = (25+ticks*20) * juliaValue; */
ptr[offset*4 + 0] = (255) * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
void generate_frame( DataBlock *d, int ticks ) {
dim3 blocks(DIM/8,DIM/8);
dim3 threads(8,8);
/*dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
*/
//dim3 grid(DIM,DIM);
//kernel<<<grid , 1>>>( d->dev_bitmap, ticks );
hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(threads), 0, 0, d->dev_bitmap, ticks );
HANDLE_ERROR( hipMemcpy( d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost ) );
}
void cleanup( DataBlock *d ) {
HANDLE_ERROR( hipFree( d->dev_bitmap ) );
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap;
HANDLE_ERROR( hipMalloc( (void**)&data.dev_bitmap,
bitmap.image_size() ) );
bitmap.anim_and_exit( (void (*)(void*,int))generate_frame,
(void (*)(void*))cleanup );
}
#endif | 8c8b7b85b1a59bf083d6d73e57d8b48f34497fac.cu | #if 1
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#include "cuda.h"
#include "../common/cpu_anim.h"
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
//__device__ int julia( int x, int y ) {
// const float scale = 1.5;
// float jx = scale * (float)(DIM/2 - x)/(DIM/2);
// float jy = scale * (float)(DIM/2 - y)/(DIM/2);
//
// //cuComplex c(-0.8, 0.156);
// cuComplex c(-0.8, 0.156);//图像变换
// cuComplex a(jx, jy);
//
// int i = 0;
// for (i=0; i<200; i++) {
// a = a * a + c;
// if (a.magnitude2() > 1000)
// return 0;
// }
//
// return 1;
//}
__device__ int julia( int x, int y,int ticks ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
//角度旋转
//double radian = ticks*PI/180;
//float jx1 = jx*cos(radian)-jy*sin(radian);
//float jx2 = jx*sin(radian)+jy*cos(radian);
//cuComplex c(-0.8, 0.156);
cuComplex c(-0.8, 0.156+ticks/1000.0);//图像变换
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}//以上为朱丽叶级数
struct DataBlock {
unsigned char *dev_bitmap; //存储要涂色的区域
CPUAnimBitmap *bitmap;
};
__global__ void kernel( unsigned char *ptr,int ticks ) {
// map from blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;//(横)块内该线程索引+该块前所有索引块的线程数
int y = threadIdx.y + blockIdx.y * blockDim.y;//(纵)块内该线程索引+该块前所有索引块的线程数
int offset = x + y * blockDim.x * gridDim.x; //二维转换成一维
// now calculate the value at that position
int juliaValue = julia( x, y ,ticks);
/* ptr[offset * 4 + 0] = (255-ticks*20)* juliaValue;
ptr[offset * 4 + 1] = (100+ticks*20) *juliaValue;
ptr[offset * 4 + 2] = (25+ticks*20) * juliaValue; */
ptr[offset*4 + 0] = (255) * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
void generate_frame( DataBlock *d, int ticks ) {
dim3 blocks(DIM/8,DIM/8);
dim3 threads(8,8);
/*dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
*/
//dim3 grid(DIM,DIM);
//kernel<<<grid , 1>>>( d->dev_bitmap, ticks );
kernel<<<blocks,threads>>>( d->dev_bitmap, ticks );
HANDLE_ERROR( cudaMemcpy( d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost ) );
}
void cleanup( DataBlock *d ) {
HANDLE_ERROR( cudaFree( d->dev_bitmap ) );
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_bitmap,
bitmap.image_size() ) );
bitmap.anim_and_exit( (void (*)(void*,int))generate_frame,
(void (*)(void*))cleanup );
}
#endif |
fcf341b4df9c4fc57a14da1fe39f14f9609e004a.hip | // !!! This is a file automatically generated by hipify!!!
#include "mosc.h"
#include <stdio.h>
#define elec (0)
#define muon (1)
#define tau (2)
#define re (0)
#define im (1)
//#define ZERO_CP
static int matrixtype = standard_type;
/* Flag to tell us if we're doing nu_e or nu_sterile matter effects */
static NuType matterFlavor = nue_type;
static double putMix[3][3][2];
__host__ void setMatterFlavor(int flavor)
{
if (flavor == nue_type) matterFlavor = nue_type;
else if (flavor == sterile_type) matterFlavor = sterile_type;
else {
//fprintf(stderr, "setMatterFlavor: flavor=%d", flavor);
//moscerr("setMatterFlavor: Illegal flavor.");
}
}
__host__ void setmix_sin(double s12,double s23,double s13,double dcp,
double Mix[3][3][2])
{
double c12,c23,c13,sd,cd;
if ( s12>1.0 ) s12=1.0;
if ( s23>1.0 ) s23=1.0;
if ( s13>1.0 ) s13=1.0;
if ( cd >1.0 ) cd =1.0;
sd = sin( dcp );
cd = cos( dcp );
c12 = sqrt(1.0-s12*s12);
c23 = sqrt(1.0-s23*s23);
c13 = sqrt(1.0-s13*s13);
if ( matrixtype == standard_type )
{
Mix[0][0][re] = c12*c13;
Mix[0][0][im] = 0.0;
Mix[0][1][re] = s12*c13;
Mix[0][1][im] = 0.0;
Mix[0][2][re] = s13*cd;
Mix[0][2][im] = -s13*sd;
Mix[1][0][re] = -s12*c23-c12*s23*s13*cd;
Mix[1][0][im] = -c12*s23*s13*sd;
Mix[1][1][re] = c12*c23-s12*s23*s13*cd;
Mix[1][1][im] = -s12*s23*s13*sd;
Mix[1][2][re] = s23*c13;
Mix[1][2][im] = 0.0;
Mix[2][0][re] = s12*s23-c12*c23*s13*cd;
Mix[2][0][im] = -c12*c23*s13*sd;
Mix[2][1][re] = -c12*s23-s12*c23*s13*cd;
Mix[2][1][im] = -s12*c23*s13*sd;
Mix[2][2][re] = c23*c13;
Mix[2][2][im] = 0.0;
}
else
{
Mix[0][0][re] = c12;
Mix[0][0][im] = 0.0;
Mix[0][1][re] = s12*c23;
Mix[0][1][im] = 0.0;
Mix[0][2][re] = s12*s23;
Mix[0][2][im] = 0.0;
Mix[1][0][re] = -s12*c13;
Mix[1][0][im] = 0.0;
Mix[1][1][re] = c12*c13*c23+s13*s23*cd;
Mix[1][1][im] = s13*s23*sd;
Mix[1][2][re] = c12*c13*s23-s13*c23*cd;
Mix[1][2][im] = -s13*c23*sd;
Mix[2][0][re] = -s12*s13;
Mix[2][0][im] = 0.0;
Mix[2][1][re] = c12*s13*c23-c13*s23*cd;
Mix[2][1][im] = -c13*s23*sd;
Mix[2][2][re] = c12*s13*s23+c13*c23*cd;
Mix[2][2][im] = c13*c23*sd;
}
}
__host__ void setmass(double dms21, double dms23, double dmVacVac[][3])
{
double delta=5.0e-9;
double mVac[3];
mVac[0] = 0.0;
mVac[1] = dms21;
mVac[2] = dms21+dms23;
/* Break any degeneracies */
if (dms21==0.0) mVac[0] -= delta;
if (dms23==0.0) mVac[2] += delta;
dmVacVac[0][0] = dmVacVac[1][1] = dmVacVac[2][2] = 0.0;
dmVacVac[0][1] = mVac[0]-mVac[1]; dmVacVac[1][0] = -dmVacVac[0][1];
dmVacVac[0][2] = mVac[0]-mVac[2]; dmVacVac[2][0] = -dmVacVac[0][2];
dmVacVac[1][2] = mVac[1]-mVac[2]; dmVacVac[2][1] = -dmVacVac[1][2];
}
/***********************************************************************
getM
Compute the matter-mass vector M, dM = M_i-M_j and
and dMimj. type<0 means anti-neutrinos type>0 means "real" neutrinos
***********************************************************************/
__device__ void getM(double Enu, double rho,
double Mix[][3][2], double dmVacVac[][3], int antitype,
double dmMatMat[][3], double dmMatVac[][3])
{
int i, j, k;
double alpha, beta, gamma, fac=0.0, arg, tmp;
double alphaV, betaV, gammaV, argV, tmpV;
double theta0, theta1, theta2;
double theta0V, theta1V, theta2V;
double mMatU[3], mMatV[3], mMat[3];
double tworttwoGf = 1.52588e-4;
/* Equations (22) fro Barger et.al.*/
/* Reverse the sign of the potential depending on neutrino type */
//if (matterFlavor == nue_type) {
/* If we're doing matter effects for electron neutrinos */
if (antitype<0) fac = tworttwoGf*Enu*rho; /* Anti-neutrinos */
else fac = -tworttwoGf*Enu*rho; /* Real-neutrinos */
//}
//else if (matterFlavor == sterile_type) {
/* If we're doing matter effects for sterile neutrinos */
//if (antitype<0) fac = -0.5*tworttwoGf*Enu*rho; /* Anti-neutrinos */
// else fac = 0.5*tworttwoGf*Enu*rho; /* Real-neutrinos */
// }
/* The strategy to sort out the three roots is to compute the vacuum
* mass the same way as the "matter" masses are computed then to sort
* the results according to the input vacuum masses
*/
alpha = fac + dmVacVac[0][1] + dmVacVac[0][2];
alphaV = dmVacVac[0][1] + dmVacVac[0][2];
#ifndef ZERO_CP
beta = dmVacVac[0][1]*dmVacVac[0][2] +
fac*(dmVacVac[0][1]*(1.0 - Mix[elec][1][re]*Mix[elec][1][re] -
Mix[elec][1][im]*Mix[elec][1][im]) +
dmVacVac[0][2]*(1.0 - Mix[elec][2][re]*Mix[elec][2][re] -
Mix[elec][2][im]*Mix[elec][2][im]));
betaV = dmVacVac[0][1]*dmVacVac[0][2];
#else
beta = dmVacVac[0][1]*dmVacVac[0][2] +
fac*(dmVacVac[0][1]*(1.0 - Mix[elec][1][re]*Mix[elec][1][re]) +
dmVacVac[0][2]*(1.0- Mix[elec][2][re]*Mix[elec][2][re]));
betaV = dmVacVac[0][1]*dmVacVac[0][2];
#endif
#ifndef ZERO_CP
gamma = fac*dmVacVac[0][1]*dmVacVac[0][2]*
(Mix[elec][0][re]*Mix[elec][0][re]+Mix[elec][0][im]*Mix[elec][0][im]);
gammaV = 0.0;
#else
gamma = fac*dmVacVac[0][1]*dmVacVac[0][2]*
(Mix[elec][0][re]*Mix[elec][0][re]);
gammaV = 0.0;
#endif
/* Compute the argument of the arc-cosine */
tmp = alpha*alpha-3.0*beta;
tmpV = alphaV*alphaV-3.0*betaV;
if (tmp<0.0) {
// fprintf(stderr, "getM: alpha^2-3*beta < 0 !\n");
tmp = 0.0;
}
/* Equation (21) */
arg = (2.0*alpha*alpha*alpha-9.0*alpha*beta+27.0*gamma)/
(2.0*sqrt(tmp*tmp*tmp));
if (fabs(arg)>1.0) arg = arg/fabs(arg);
argV = (2.0*alphaV*alphaV*alphaV-9.0*alphaV*betaV+27.0*gammaV)/
(2.0*sqrt(tmpV*tmpV*tmpV));
if (fabs(argV)>1.0) argV = argV/fabs(argV);
/* These are the three roots the paper refers to */
theta0 = acos(arg)/3.0;
theta1 = theta0-(2.0*M_PI/3.0);
theta2 = theta0+(2.0*M_PI/3.0);
theta0V = acos(argV)/3.0;
theta1V = theta0V-(2.0*M_PI/3.0);
theta2V = theta0V+(2.0*M_PI/3.0);
mMatU[0] = mMatU[1] = mMatU[2] = -(2.0/3.0)*sqrt(tmp);
mMatU[0] *= cos(theta0); mMatU[1] *= cos(theta1); mMatU[2] *= cos(theta2);
tmp = dmVacVac[0][0] - alpha/3.0;
mMatU[0] += tmp; mMatU[1] += tmp; mMatU[2] += tmp;
mMatV[0] = mMatV[1] = mMatV[2] = -(2.0/3.0)*sqrt(tmpV);
mMatV[0] *= cos(theta0V); mMatV[1] *= cos(theta1V); mMatV[2] *= cos(theta2V);
tmpV = dmVacVac[0][0] - alphaV/3.0;
mMatV[0] += tmpV; mMatV[1] += tmpV; mMatV[2] += tmpV;
/* Sort according to which reproduce the vaccum eigenstates */
for (i=0; i<3; i++) {
tmpV = fabs(dmVacVac[i][0]-mMatV[0]);
k = 0;
for (j=1; j<3; j++) {
tmp = fabs(dmVacVac[i][0]-mMatV[j]);
if (tmp<tmpV) {
k = j;
tmpV = tmp;
}
}
mMat[i] = mMatU[k];
}
for (i=0; i<3; i++) {
for (j=0; j<3; j++) {
dmMatMat[i][j] = mMat[i] - mMat[j];
dmMatVac[i][j] = mMat[i] - dmVacVac[j][0];
}
}
}
/***********************************************************************
getA
Calculate the transition amplitude matrix A (equation 10)
***********************************************************************/
__device__ void getA(double L, double E, double rho,
double Mix[][3][2], double dmMatVac[][3],
double dmMatMat[][3], int antitype, double A[3][3][2],
double phase_offset)
{
/*
DARN - looks like this is all junk...more debugging needed...
*/
//int n, m, i, j, k;
double /*fac=0.0,*/ arg, c, s;
// TCA ADDITION: set equal to 0!
double X[3][3][2] = {0.0};
double product[3][3][3][2] = {0.0};
/* (1/2)*(1/(h_bar*c)) in units of GeV/(eV^2-km) */
const double LoEfac = 2.534;
if ( phase_offset==0.0 )
{
get_product(L, E, rho, Mix, dmMatVac, dmMatMat, antitype, product);
}
/////////////// product is JUNK /////////////
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
//printf(" product[%d][%d]: %f, %f\n",i,j,*product[i][j][0],*product[i][j][1]);
//printf(" A[%d][%d]: %f, %f\n",i,j,A[i][j][0],A[i][j][1]);
}
}
/* Make the sum with the exponential factor */
//hipMemset(X, 0, 3*3*2*sizeof(double));
//memset(X, 0, 3*3*2*sizeof(double));
for (int k=0; k<3; k++)
{
arg = -LoEfac*dmMatVac[k][0]*L/E;
if ( k==2 ) arg += phase_offset ;
c = cos(arg);
s = sin(arg);
for (int i=0; i<3; i++)
{
for (int j=0; j<3; j++)
{
#ifndef ZERO_CP
X[i][j][re] += c*product[i][j][k][re] - s*product[i][j][k][im];
X[i][j][im] += c*product[i][j][k][im] + s*product[i][j][k][re];
#else
X[i][j][re] += c*product[i][j][k][re];
X[i][j][im] += s*product[i][j][k][re];
#endif
}
}
}
/* Compute the product with the mixing matrices */
for(int i=0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
A[i][j][k] = 0;
for (int n=0; n<3; n++) {
for (int m=0; m<3; m++) {
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++) {
#ifndef ZERO_CP
A[n][m][re] +=
Mix[n][i][re]*X[i][j][re]*Mix[m][j][re] +
Mix[n][i][re]*X[i][j][im]*Mix[m][j][im] +
Mix[n][i][im]*X[i][j][re]*Mix[m][j][im] -
Mix[n][i][im]*X[i][j][im]*Mix[m][j][re];
//printf("\nregret %f %f %f",Mix[n][i][re], X[i][j][im], Mix[m][j][im]);
A[n][m][im] +=
Mix[n][i][im]*X[i][j][im]*Mix[m][j][im] +
Mix[n][i][im]*X[i][j][re]*Mix[m][j][re] +
Mix[n][i][re]*X[i][j][im]*Mix[m][j][re] -
Mix[n][i][re]*X[i][j][re]*Mix[m][j][im];
#else
A[n][m][re] +=
Mix[n][i][re]*X[i][j][re]*Mix[m][j][re];
A[n][m][im] +=
Mix[n][i][re]*X[i][j][im]*Mix[m][j][re];
#endif
//printf("\n %i %i %i A %f", n, m, re, A[n][m][re]);
}
}
}
}
//printf("(getA) Aout: %f\n",A[0][0][0]);
}
__device__ void get_product(double L, double E, double rho,double Mix[][3][2],
double dmMatVac[][3], double dmMatMat[][3],
int antitype,
double product[][3][3][2])
{
double fac=0.0;
double twoEHmM[3][3][3][2];
double tworttwoGf = 1.52588e-4;
/* (1/2)*(1/(h_bar*c)) in units of GeV/(eV^2-km) */
/* Reverse the sign of the potential depending on neutrino type */
//if (matterFlavor == nue_type) {
/* If we're doing matter effects for electron neutrinos */
if (antitype<0) fac = tworttwoGf*E*rho; /* Anti-neutrinos */
else fac = -tworttwoGf*E*rho; /* Real-neutrinos */
// }
/*
else if (matterFlavor == sterile_type) {
// If we're doing matter effects for sterile neutrinos
if (antitype<0) fac = -0.5*tworttwoGf*E*rho; // Anti-neutrinos
else fac = 0.5*tworttwoGf*E*rho; // Real-neutrinos
} */
/* Calculate the matrix 2EH-M_j */
for (int n=0; n<3; n++) {
for (int m=0; m<3; m++) {
#ifndef ZERO_CP
twoEHmM[n][m][0][re] =
-fac*(Mix[0][n][re]*Mix[0][m][re]+Mix[0][n][im]*Mix[0][m][im]);
twoEHmM[n][m][0][im] =
-fac*(Mix[0][n][re]*Mix[0][m][im]-Mix[0][n][im]*Mix[0][m][re]);
twoEHmM[n][m][1][re] = twoEHmM[n][m][2][re] = twoEHmM[n][m][0][re];
twoEHmM[n][m][1][im] = twoEHmM[n][m][2][im] = twoEHmM[n][m][0][im];
#else
twoEHmM[n][m][0][re] =
-fac*(Mix[0][n][re]*Mix[0][m][re]);
twoEHmM[n][m][0][im] = 0 ;
twoEHmM[n][m][1][re] = twoEHmM[n][m][2][re] = twoEHmM[n][m][0][re];
twoEHmM[n][m][1][im] = twoEHmM[n][m][2][im] = twoEHmM[n][m][0][im];
#endif
if (n==m) for (int j=0; j<3; j++)
twoEHmM[n][m][j][re] -= dmMatVac[j][n];
}
}
/* Calculate the product in eq.(10) of twoEHmM for j!=k */
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++) {
for (int k=0; k<3; k++) {
#ifndef ZERO_CP
product[i][j][0][re] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][re] -
twoEHmM[i][k][1][im]*twoEHmM[k][j][2][im];
product[i][j][0][im] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][im] +
twoEHmM[i][k][1][im]*twoEHmM[k][j][2][re];
product[i][j][1][re] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][re] -
twoEHmM[i][k][2][im]*twoEHmM[k][j][0][im];
product[i][j][1][im] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][im] +
twoEHmM[i][k][2][im]*twoEHmM[k][j][0][re];
product[i][j][2][re] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][re] -
twoEHmM[i][k][0][im]*twoEHmM[k][j][1][im];
product[i][j][2][im] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][im] +
twoEHmM[i][k][0][im]*twoEHmM[k][j][1][re];
#else
product[i][j][0][re] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][re];
product[i][j][1][re] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][re];
product[i][j][2][re] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][re];
#endif
}
#ifndef ZERO_CP
product[i][j][0][re] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][0][im] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][1][re] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][1][im] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][2][re] /= (dmMatMat[2][0]*dmMatMat[2][1]);
product[i][j][2][im] /= (dmMatMat[2][0]*dmMatMat[2][1]);
#else
product[i][j][0][re] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][1][re] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][2][re] /= (dmMatMat[2][0]*dmMatMat[2][1]);
#endif
}
}
}
| fcf341b4df9c4fc57a14da1fe39f14f9609e004a.cu | #include "mosc.h"
#include <stdio.h>
#define elec (0)
#define muon (1)
#define tau (2)
#define re (0)
#define im (1)
//#define ZERO_CP
static int matrixtype = standard_type;
/* Flag to tell us if we're doing nu_e or nu_sterile matter effects */
static NuType matterFlavor = nue_type;
static double putMix[3][3][2];
__host__ void setMatterFlavor(int flavor)
{
if (flavor == nue_type) matterFlavor = nue_type;
else if (flavor == sterile_type) matterFlavor = sterile_type;
else {
//fprintf(stderr, "setMatterFlavor: flavor=%d", flavor);
//moscerr("setMatterFlavor: Illegal flavor.");
}
}
__host__ void setmix_sin(double s12,double s23,double s13,double dcp,
double Mix[3][3][2])
{
double c12,c23,c13,sd,cd;
if ( s12>1.0 ) s12=1.0;
if ( s23>1.0 ) s23=1.0;
if ( s13>1.0 ) s13=1.0;
if ( cd >1.0 ) cd =1.0;
sd = sin( dcp );
cd = cos( dcp );
c12 = sqrt(1.0-s12*s12);
c23 = sqrt(1.0-s23*s23);
c13 = sqrt(1.0-s13*s13);
if ( matrixtype == standard_type )
{
Mix[0][0][re] = c12*c13;
Mix[0][0][im] = 0.0;
Mix[0][1][re] = s12*c13;
Mix[0][1][im] = 0.0;
Mix[0][2][re] = s13*cd;
Mix[0][2][im] = -s13*sd;
Mix[1][0][re] = -s12*c23-c12*s23*s13*cd;
Mix[1][0][im] = -c12*s23*s13*sd;
Mix[1][1][re] = c12*c23-s12*s23*s13*cd;
Mix[1][1][im] = -s12*s23*s13*sd;
Mix[1][2][re] = s23*c13;
Mix[1][2][im] = 0.0;
Mix[2][0][re] = s12*s23-c12*c23*s13*cd;
Mix[2][0][im] = -c12*c23*s13*sd;
Mix[2][1][re] = -c12*s23-s12*c23*s13*cd;
Mix[2][1][im] = -s12*c23*s13*sd;
Mix[2][2][re] = c23*c13;
Mix[2][2][im] = 0.0;
}
else
{
Mix[0][0][re] = c12;
Mix[0][0][im] = 0.0;
Mix[0][1][re] = s12*c23;
Mix[0][1][im] = 0.0;
Mix[0][2][re] = s12*s23;
Mix[0][2][im] = 0.0;
Mix[1][0][re] = -s12*c13;
Mix[1][0][im] = 0.0;
Mix[1][1][re] = c12*c13*c23+s13*s23*cd;
Mix[1][1][im] = s13*s23*sd;
Mix[1][2][re] = c12*c13*s23-s13*c23*cd;
Mix[1][2][im] = -s13*c23*sd;
Mix[2][0][re] = -s12*s13;
Mix[2][0][im] = 0.0;
Mix[2][1][re] = c12*s13*c23-c13*s23*cd;
Mix[2][1][im] = -c13*s23*sd;
Mix[2][2][re] = c12*s13*s23+c13*c23*cd;
Mix[2][2][im] = c13*c23*sd;
}
}
__host__ void setmass(double dms21, double dms23, double dmVacVac[][3])
{
double delta=5.0e-9;
double mVac[3];
mVac[0] = 0.0;
mVac[1] = dms21;
mVac[2] = dms21+dms23;
/* Break any degeneracies */
if (dms21==0.0) mVac[0] -= delta;
if (dms23==0.0) mVac[2] += delta;
dmVacVac[0][0] = dmVacVac[1][1] = dmVacVac[2][2] = 0.0;
dmVacVac[0][1] = mVac[0]-mVac[1]; dmVacVac[1][0] = -dmVacVac[0][1];
dmVacVac[0][2] = mVac[0]-mVac[2]; dmVacVac[2][0] = -dmVacVac[0][2];
dmVacVac[1][2] = mVac[1]-mVac[2]; dmVacVac[2][1] = -dmVacVac[1][2];
}
/***********************************************************************
getM
Compute the matter-mass vector M, dM = M_i-M_j and
and dMimj. type<0 means anti-neutrinos type>0 means "real" neutrinos
***********************************************************************/
__device__ void getM(double Enu, double rho,
double Mix[][3][2], double dmVacVac[][3], int antitype,
double dmMatMat[][3], double dmMatVac[][3])
{
int i, j, k;
double alpha, beta, gamma, fac=0.0, arg, tmp;
double alphaV, betaV, gammaV, argV, tmpV;
double theta0, theta1, theta2;
double theta0V, theta1V, theta2V;
double mMatU[3], mMatV[3], mMat[3];
double tworttwoGf = 1.52588e-4;
/* Equations (22) fro Barger et.al.*/
/* Reverse the sign of the potential depending on neutrino type */
//if (matterFlavor == nue_type) {
/* If we're doing matter effects for electron neutrinos */
if (antitype<0) fac = tworttwoGf*Enu*rho; /* Anti-neutrinos */
else fac = -tworttwoGf*Enu*rho; /* Real-neutrinos */
//}
//else if (matterFlavor == sterile_type) {
/* If we're doing matter effects for sterile neutrinos */
//if (antitype<0) fac = -0.5*tworttwoGf*Enu*rho; /* Anti-neutrinos */
// else fac = 0.5*tworttwoGf*Enu*rho; /* Real-neutrinos */
// }
/* The strategy to sort out the three roots is to compute the vacuum
* mass the same way as the "matter" masses are computed then to sort
* the results according to the input vacuum masses
*/
alpha = fac + dmVacVac[0][1] + dmVacVac[0][2];
alphaV = dmVacVac[0][1] + dmVacVac[0][2];
#ifndef ZERO_CP
beta = dmVacVac[0][1]*dmVacVac[0][2] +
fac*(dmVacVac[0][1]*(1.0 - Mix[elec][1][re]*Mix[elec][1][re] -
Mix[elec][1][im]*Mix[elec][1][im]) +
dmVacVac[0][2]*(1.0 - Mix[elec][2][re]*Mix[elec][2][re] -
Mix[elec][2][im]*Mix[elec][2][im]));
betaV = dmVacVac[0][1]*dmVacVac[0][2];
#else
beta = dmVacVac[0][1]*dmVacVac[0][2] +
fac*(dmVacVac[0][1]*(1.0 - Mix[elec][1][re]*Mix[elec][1][re]) +
dmVacVac[0][2]*(1.0- Mix[elec][2][re]*Mix[elec][2][re]));
betaV = dmVacVac[0][1]*dmVacVac[0][2];
#endif
#ifndef ZERO_CP
gamma = fac*dmVacVac[0][1]*dmVacVac[0][2]*
(Mix[elec][0][re]*Mix[elec][0][re]+Mix[elec][0][im]*Mix[elec][0][im]);
gammaV = 0.0;
#else
gamma = fac*dmVacVac[0][1]*dmVacVac[0][2]*
(Mix[elec][0][re]*Mix[elec][0][re]);
gammaV = 0.0;
#endif
/* Compute the argument of the arc-cosine */
tmp = alpha*alpha-3.0*beta;
tmpV = alphaV*alphaV-3.0*betaV;
if (tmp<0.0) {
// fprintf(stderr, "getM: alpha^2-3*beta < 0 !\n");
tmp = 0.0;
}
/* Equation (21) */
arg = (2.0*alpha*alpha*alpha-9.0*alpha*beta+27.0*gamma)/
(2.0*sqrt(tmp*tmp*tmp));
if (fabs(arg)>1.0) arg = arg/fabs(arg);
argV = (2.0*alphaV*alphaV*alphaV-9.0*alphaV*betaV+27.0*gammaV)/
(2.0*sqrt(tmpV*tmpV*tmpV));
if (fabs(argV)>1.0) argV = argV/fabs(argV);
/* These are the three roots the paper refers to */
theta0 = acos(arg)/3.0;
theta1 = theta0-(2.0*M_PI/3.0);
theta2 = theta0+(2.0*M_PI/3.0);
theta0V = acos(argV)/3.0;
theta1V = theta0V-(2.0*M_PI/3.0);
theta2V = theta0V+(2.0*M_PI/3.0);
mMatU[0] = mMatU[1] = mMatU[2] = -(2.0/3.0)*sqrt(tmp);
mMatU[0] *= cos(theta0); mMatU[1] *= cos(theta1); mMatU[2] *= cos(theta2);
tmp = dmVacVac[0][0] - alpha/3.0;
mMatU[0] += tmp; mMatU[1] += tmp; mMatU[2] += tmp;
mMatV[0] = mMatV[1] = mMatV[2] = -(2.0/3.0)*sqrt(tmpV);
mMatV[0] *= cos(theta0V); mMatV[1] *= cos(theta1V); mMatV[2] *= cos(theta2V);
tmpV = dmVacVac[0][0] - alphaV/3.0;
mMatV[0] += tmpV; mMatV[1] += tmpV; mMatV[2] += tmpV;
/* Sort according to which reproduce the vaccum eigenstates */
for (i=0; i<3; i++) {
tmpV = fabs(dmVacVac[i][0]-mMatV[0]);
k = 0;
for (j=1; j<3; j++) {
tmp = fabs(dmVacVac[i][0]-mMatV[j]);
if (tmp<tmpV) {
k = j;
tmpV = tmp;
}
}
mMat[i] = mMatU[k];
}
for (i=0; i<3; i++) {
for (j=0; j<3; j++) {
dmMatMat[i][j] = mMat[i] - mMat[j];
dmMatVac[i][j] = mMat[i] - dmVacVac[j][0];
}
}
}
/***********************************************************************
getA
Calculate the transition amplitude matrix A (equation 10)
***********************************************************************/
__device__ void getA(double L, double E, double rho,
double Mix[][3][2], double dmMatVac[][3],
double dmMatMat[][3], int antitype, double A[3][3][2],
double phase_offset)
{
/*
DARN - looks like this is all junk...more debugging needed...
*/
//int n, m, i, j, k;
double /*fac=0.0,*/ arg, c, s;
// TCA ADDITION: set equal to 0!
double X[3][3][2] = {0.0};
double product[3][3][3][2] = {0.0};
/* (1/2)*(1/(h_bar*c)) in units of GeV/(eV^2-km) */
const double LoEfac = 2.534;
if ( phase_offset==0.0 )
{
get_product(L, E, rho, Mix, dmMatVac, dmMatMat, antitype, product);
}
/////////////// product is JUNK /////////////
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
//printf(" product[%d][%d]: %f, %f\n",i,j,*product[i][j][0],*product[i][j][1]);
//printf(" A[%d][%d]: %f, %f\n",i,j,A[i][j][0],A[i][j][1]);
}
}
/* Make the sum with the exponential factor */
//cudaMemset(X, 0, 3*3*2*sizeof(double));
//memset(X, 0, 3*3*2*sizeof(double));
for (int k=0; k<3; k++)
{
arg = -LoEfac*dmMatVac[k][0]*L/E;
if ( k==2 ) arg += phase_offset ;
c = cos(arg);
s = sin(arg);
for (int i=0; i<3; i++)
{
for (int j=0; j<3; j++)
{
#ifndef ZERO_CP
X[i][j][re] += c*product[i][j][k][re] - s*product[i][j][k][im];
X[i][j][im] += c*product[i][j][k][im] + s*product[i][j][k][re];
#else
X[i][j][re] += c*product[i][j][k][re];
X[i][j][im] += s*product[i][j][k][re];
#endif
}
}
}
/* Compute the product with the mixing matrices */
for(int i=0; i < 3; i++)
for(int j = 0; j < 3; j++)
for(int k = 0; k < 2; k++)
A[i][j][k] = 0;
for (int n=0; n<3; n++) {
for (int m=0; m<3; m++) {
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++) {
#ifndef ZERO_CP
A[n][m][re] +=
Mix[n][i][re]*X[i][j][re]*Mix[m][j][re] +
Mix[n][i][re]*X[i][j][im]*Mix[m][j][im] +
Mix[n][i][im]*X[i][j][re]*Mix[m][j][im] -
Mix[n][i][im]*X[i][j][im]*Mix[m][j][re];
//printf("\nregret %f %f %f",Mix[n][i][re], X[i][j][im], Mix[m][j][im]);
A[n][m][im] +=
Mix[n][i][im]*X[i][j][im]*Mix[m][j][im] +
Mix[n][i][im]*X[i][j][re]*Mix[m][j][re] +
Mix[n][i][re]*X[i][j][im]*Mix[m][j][re] -
Mix[n][i][re]*X[i][j][re]*Mix[m][j][im];
#else
A[n][m][re] +=
Mix[n][i][re]*X[i][j][re]*Mix[m][j][re];
A[n][m][im] +=
Mix[n][i][re]*X[i][j][im]*Mix[m][j][re];
#endif
//printf("\n %i %i %i A %f", n, m, re, A[n][m][re]);
}
}
}
}
//printf("(getA) Aout: %f\n",A[0][0][0]);
}
__device__ void get_product(double L, double E, double rho,double Mix[][3][2],
double dmMatVac[][3], double dmMatMat[][3],
int antitype,
double product[][3][3][2])
{
double fac=0.0;
double twoEHmM[3][3][3][2];
double tworttwoGf = 1.52588e-4;
/* (1/2)*(1/(h_bar*c)) in units of GeV/(eV^2-km) */
/* Reverse the sign of the potential depending on neutrino type */
//if (matterFlavor == nue_type) {
/* If we're doing matter effects for electron neutrinos */
if (antitype<0) fac = tworttwoGf*E*rho; /* Anti-neutrinos */
else fac = -tworttwoGf*E*rho; /* Real-neutrinos */
// }
/*
else if (matterFlavor == sterile_type) {
// If we're doing matter effects for sterile neutrinos
if (antitype<0) fac = -0.5*tworttwoGf*E*rho; // Anti-neutrinos
else fac = 0.5*tworttwoGf*E*rho; // Real-neutrinos
} */
/* Calculate the matrix 2EH-M_j */
for (int n=0; n<3; n++) {
for (int m=0; m<3; m++) {
#ifndef ZERO_CP
twoEHmM[n][m][0][re] =
-fac*(Mix[0][n][re]*Mix[0][m][re]+Mix[0][n][im]*Mix[0][m][im]);
twoEHmM[n][m][0][im] =
-fac*(Mix[0][n][re]*Mix[0][m][im]-Mix[0][n][im]*Mix[0][m][re]);
twoEHmM[n][m][1][re] = twoEHmM[n][m][2][re] = twoEHmM[n][m][0][re];
twoEHmM[n][m][1][im] = twoEHmM[n][m][2][im] = twoEHmM[n][m][0][im];
#else
twoEHmM[n][m][0][re] =
-fac*(Mix[0][n][re]*Mix[0][m][re]);
twoEHmM[n][m][0][im] = 0 ;
twoEHmM[n][m][1][re] = twoEHmM[n][m][2][re] = twoEHmM[n][m][0][re];
twoEHmM[n][m][1][im] = twoEHmM[n][m][2][im] = twoEHmM[n][m][0][im];
#endif
if (n==m) for (int j=0; j<3; j++)
twoEHmM[n][m][j][re] -= dmMatVac[j][n];
}
}
/* Calculate the product in eq.(10) of twoEHmM for j!=k */
for (int i=0; i<3; i++) {
for (int j=0; j<3; j++) {
for (int k=0; k<3; k++) {
#ifndef ZERO_CP
product[i][j][0][re] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][re] -
twoEHmM[i][k][1][im]*twoEHmM[k][j][2][im];
product[i][j][0][im] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][im] +
twoEHmM[i][k][1][im]*twoEHmM[k][j][2][re];
product[i][j][1][re] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][re] -
twoEHmM[i][k][2][im]*twoEHmM[k][j][0][im];
product[i][j][1][im] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][im] +
twoEHmM[i][k][2][im]*twoEHmM[k][j][0][re];
product[i][j][2][re] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][re] -
twoEHmM[i][k][0][im]*twoEHmM[k][j][1][im];
product[i][j][2][im] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][im] +
twoEHmM[i][k][0][im]*twoEHmM[k][j][1][re];
#else
product[i][j][0][re] +=
twoEHmM[i][k][1][re]*twoEHmM[k][j][2][re];
product[i][j][1][re] +=
twoEHmM[i][k][2][re]*twoEHmM[k][j][0][re];
product[i][j][2][re] +=
twoEHmM[i][k][0][re]*twoEHmM[k][j][1][re];
#endif
}
#ifndef ZERO_CP
product[i][j][0][re] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][0][im] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][1][re] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][1][im] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][2][re] /= (dmMatMat[2][0]*dmMatMat[2][1]);
product[i][j][2][im] /= (dmMatMat[2][0]*dmMatMat[2][1]);
#else
product[i][j][0][re] /= (dmMatMat[0][1]*dmMatMat[0][2]);
product[i][j][1][re] /= (dmMatMat[1][2]*dmMatMat[1][0]);
product[i][j][2][re] /= (dmMatMat[2][0]*dmMatMat[2][1]);
#endif
}
}
}
|
4a4dce5ff3d5e180dd306dc1468362af8cce93b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "dense_help_func.hpp"
#include "quantization_8bit.cu"
int main(int argc, char** argv) {
if (argc != 4) {
printf("usage: ./main [M] [K] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t K = atoi(argv[2]);
size_t N = atoi(argv[3]);
// for uint8
size_t bytes = sizeof(uint8_t) * M * K;
uint8_t* h_A = (uint8_t*)malloc(bytes);
uint8_t* h_B = (uint8_t*)malloc(bytes);
uint8_t* h_C = (uint8_t*)malloc(bytes);
uint8_t* d_A;
uint8_t* d_B;
uint8_t* d_C;
checkCudaErrors(hipMalloc(&d_A, bytes));
checkCudaErrors(hipMalloc(&d_B, bytes));
checkCudaErrors(hipMalloc(&d_C, bytes));
// for float
size_t fbytes = sizeof(float) * M * K;
float* fh_A = (float*)malloc(fbytes);
float* fh_B = (float*)malloc(fbytes);
float* fh_C = (float*)malloc(fbytes);
float* fd_A;
float* fd_B;
float* fd_C;
checkCudaErrors(hipMalloc(&fd_A, fbytes));
checkCudaErrors(hipMalloc(&fd_B, fbytes));
checkCudaErrors(hipMalloc(&fd_C, fbytes));
double msecPerMatrixMul[2] = {0, 0};
double gigaFlops[2] = {0, 0};
double flopsPerMatrixMul = 2.0 * M * N * K;
const int BLOCK_SIZE_M = 32;
const int BLOCK_SIZE_K = 32;
const int BLOCK_SIZE_N = 32;
const int THREAD_SIZE_X = 4;
const int THREAD_SIZE_Y = 4;
const bool ENABLE_DOUBLE_BUFFER = false;
const int BIT_WIDTH = 8;
int k_block = K / BLOCK_SIZE_K;
int stride = 2;
// A
for( int i = 0; i < M * K; i++ ) {
int row = (i / K);
int col = (i % K);
int row_block = row / BLOCK_SIZE_M;
int col_block = col / BLOCK_SIZE_K;
if ((row_block * k_block + col_block) % stride == 0) {
h_A[i] = 1;
fh_A[i] = 1;
}
else {
h_A[i] = 0;
fh_A[i] = 0;
}
}
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_A[i * K + j]);
// }
// printf("\n");
// }
// B
for( int i = 0; i < K * N; i++ ) {
if ( i >= K * N / 2) {
h_B[i] = 2;
fh_B[i] = 2;
}
else {
h_B[i] = 0;
fh_B[i] = 0;
}
}
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_B[i * K + j]);
// }
// printf("\n");
// }
checkCudaErrors(hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
float msecTotal = 0;
int nIter = 100;
checkCudaErrors(hipMemcpy( d_C, h_C, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y);
dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M);
hipLaunchKernelGGL(( MatrixMulCUDAQuantize8bit<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, BIT_WIDTH, ENABLE_DOUBLE_BUFFER>)
, dim3(dimGrid), dim3(dimBlock) , 0, 0, (uint32_t*)d_A, (uint32_t*)d_B, (uint32_t*)d_C, K, N);
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(hipMemcpy( h_C, d_C, bytes, hipMemcpyDeviceToHost));
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_C[i * K + j]);
// }
// printf("\n");
// }
msecPerMatrixMul[0] = msecTotal / nIter;
gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f);
printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[0],
msecPerMatrixMul[0],
flopsPerMatrixMul);
// cublas
hipblasHandle_t blas_handle;
checkCuBlasErrors ( hipblasCreate(&blas_handle) );
float alpha = 1.0;
float beta = 0;
checkCudaErrors(hipMemcpy( fd_A, fh_A, fbytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy( fd_B, fh_B, fbytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy( fd_C, fh_C, fbytes, hipMemcpyHostToDevice));
checkCudaErrors(hipEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
checkCuBlasErrors (
hipblasSgemm (blas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T,
M, N, K, &alpha,
fd_A, M, fd_B, K, &beta, fd_C, K
)
);
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(hipMemcpy( fh_C, fd_C, fbytes, hipMemcpyDeviceToHost));
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%0.f ", fh_C[j * M + i]);
// }
// printf("\n");
// }
msecPerMatrixMul[1] = msecTotal / nIter;
gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f);
printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[1],
msecPerMatrixMul[1],
flopsPerMatrixMul);
hipblasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M * N; i++) {
// fh_C
int row = i / N;
int col = i % N;
double abs_err = fabs(h_C[i] - fh_C[col * M + row]);
double dot_length = M;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, (float)h_C[i], fh_C[col * M + row], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]);
// Free Memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
hipFree(fd_A);
hipFree(fd_B);
hipFree(fd_C);
free(fh_A);
free(fh_B);
free(fh_C);
} | 4a4dce5ff3d5e180dd306dc1468362af8cce93b4.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "dense_help_func.hpp"
#include "quantization_8bit.cu"
int main(int argc, char** argv) {
if (argc != 4) {
printf("usage: ./main [M] [K] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t K = atoi(argv[2]);
size_t N = atoi(argv[3]);
// for uint8
size_t bytes = sizeof(uint8_t) * M * K;
uint8_t* h_A = (uint8_t*)malloc(bytes);
uint8_t* h_B = (uint8_t*)malloc(bytes);
uint8_t* h_C = (uint8_t*)malloc(bytes);
uint8_t* d_A;
uint8_t* d_B;
uint8_t* d_C;
checkCudaErrors(cudaMalloc(&d_A, bytes));
checkCudaErrors(cudaMalloc(&d_B, bytes));
checkCudaErrors(cudaMalloc(&d_C, bytes));
// for float
size_t fbytes = sizeof(float) * M * K;
float* fh_A = (float*)malloc(fbytes);
float* fh_B = (float*)malloc(fbytes);
float* fh_C = (float*)malloc(fbytes);
float* fd_A;
float* fd_B;
float* fd_C;
checkCudaErrors(cudaMalloc(&fd_A, fbytes));
checkCudaErrors(cudaMalloc(&fd_B, fbytes));
checkCudaErrors(cudaMalloc(&fd_C, fbytes));
double msecPerMatrixMul[2] = {0, 0};
double gigaFlops[2] = {0, 0};
double flopsPerMatrixMul = 2.0 * M * N * K;
const int BLOCK_SIZE_M = 32;
const int BLOCK_SIZE_K = 32;
const int BLOCK_SIZE_N = 32;
const int THREAD_SIZE_X = 4;
const int THREAD_SIZE_Y = 4;
const bool ENABLE_DOUBLE_BUFFER = false;
const int BIT_WIDTH = 8;
int k_block = K / BLOCK_SIZE_K;
int stride = 2;
// 生成A的数据
for( int i = 0; i < M * K; i++ ) {
int row = (i / K);
int col = (i % K);
int row_block = row / BLOCK_SIZE_M;
int col_block = col / BLOCK_SIZE_K;
if ((row_block * k_block + col_block) % stride == 0) {
h_A[i] = 1;
fh_A[i] = 1;
}
else {
h_A[i] = 0;
fh_A[i] = 0;
}
}
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_A[i * K + j]);
// }
// printf("\n");
// }
// 生成B的数据
for( int i = 0; i < K * N; i++ ) {
if ( i >= K * N / 2) {
h_B[i] = 2;
fh_B[i] = 2;
}
else {
h_B[i] = 0;
fh_B[i] = 0;
}
}
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_B[i * K + j]);
// }
// printf("\n");
// }
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float msecTotal = 0;
int nIter = 100;
checkCudaErrors(cudaMemcpy( d_C, h_C, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y);
dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M);
MatrixMulCUDAQuantize8bit<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, BIT_WIDTH, ENABLE_DOUBLE_BUFFER>
<<< dimGrid, dimBlock >>>((uint32_t*)d_A, (uint32_t*)d_B, (uint32_t*)d_C, K, N);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( h_C, d_C, bytes, cudaMemcpyDeviceToHost));
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%d ", h_C[i * K + j]);
// }
// printf("\n");
// }
msecPerMatrixMul[0] = msecTotal / nIter;
gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f);
printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[0],
msecPerMatrixMul[0],
flopsPerMatrixMul);
// cublas
cublasHandle_t blas_handle;
checkCuBlasErrors ( cublasCreate(&blas_handle) );
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( fd_A, fh_A, fbytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( fd_B, fh_B, fbytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( fd_C, fh_C, fbytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
checkCuBlasErrors (
cublasSgemm (blas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
M, N, K, &alpha,
fd_A, M, fd_B, K, &beta, fd_C, K
)
);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( fh_C, fd_C, fbytes, cudaMemcpyDeviceToHost));
// printf("\n");
// for( int i = 0; i < M; i++ ) {
// for( int j = 0; j < K; j++ ) {
// printf("%0.f ", fh_C[j * M + i]);
// }
// printf("\n");
// }
msecPerMatrixMul[1] = msecTotal / nIter;
gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f);
printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[1],
msecPerMatrixMul[1],
flopsPerMatrixMul);
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M * N; i++) {
// fh_C 是转置
int row = i / N;
int col = i % N;
double abs_err = fabs(h_C[i] - fh_C[col * M + row]);
double dot_length = M;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, (float)h_C[i], fh_C[col * M + row], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]);
// Free Memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
cudaFree(fd_A);
cudaFree(fd_B);
cudaFree(fd_C);
free(fh_A);
free(fh_B);
free(fh_C);
} |
eb1ef46380def8a535a6854e7848fab49d58fbf0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaDclamp_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
unsigned int size = 1;
double minVal = 1;
double maxVal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaDclamp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,minVal,maxVal);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaDclamp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,minVal,maxVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaDclamp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,size,minVal,maxVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eb1ef46380def8a535a6854e7848fab49d58fbf0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaDclamp_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
unsigned int size = 1;
double minVal = 1;
double maxVal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaDclamp_kernel<<<gridBlock,threadBlock>>>(x,size,minVal,maxVal);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaDclamp_kernel<<<gridBlock,threadBlock>>>(x,size,minVal,maxVal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaDclamp_kernel<<<gridBlock,threadBlock>>>(x,size,minVal,maxVal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ca6d4861801c96bad751ddc06e5c235957f57501.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| ca6d4861801c96bad751ddc06e5c235957f57501.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
ce36887525e4321e942499252e721344d90a2e77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#include <algorithm>
#include <array>
#include <assert.h>
#include <cfloat>
#include <cstdio>
#include <math.h>
#include <stdio.h>
// This macro is to control shared memory usage. If set to 1, kernel loads the whole feature map
// into shared memory for reuse; If set to 0, kernel loads data from global memory directly.
// Roi pooling performance is data dependent. You can test which value is better to your data.
// If all bboxes are very small, 0 is recommended, otherwise, shared memory will load many unused
// data; If bboxes have many overlaps, 1 is recommended to avoid duplicate loads.
// 1 requires larger shared memory size. It may fail if it is larger than CUDA allowed per-block
// shared memory size upper bound. Then you have to use 0.
#define ROIPOOLING_FEATURE_MAP_USE_SHMEM 1
template <typename T>
__device__ T getMax();
template <>
__device__ __forceinline__ int8_t getMax<int8_t>()
{
return INT8_MAX;
}
template <>
__device__ __forceinline__ float getMax<float>()
{
return FLT_MAX;
}
// ROI POOLING FORWARD KERNEL
template <typename DATA_T, typename ROI_T, bool INFER_ONLY, bool FM_IN_SMEM>
__global__ void ROIPoolingForwardKernelAligned(int32_t ROICount, const ROI_T* rois,
int32_t N, // feature map size
int32_t C, // feature map size
int32_t H, // feature map size
int32_t W, // feature map size
const DATA_T* featureMap, const int32_t poolingH, const int32_t poolingW, const float spatialScale, DATA_T* top,
int32_t* maxIds, int32_t fmapStep)
{
extern __shared__ float smem[];
DATA_T* feature_shr = (DATA_T*) &smem[0];
int* rois_shr = nullptr;
if (FM_IN_SMEM)
{
rois_shr = (int*) &feature_shr[H * W];
}
else
{
rois_shr = (int*) &feature_shr[0];
feature_shr = nullptr;
}
const int batch = blockIdx.x / C;
const int channel = blockIdx.x % C;
// load ROIs to shared memory
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
int offset = j << 2;
float4 roi = reinterpret_cast<float4*>(const_cast<float*>(rois))[batch * ROICount + j];
// spatialScale = 1.0 / featureStride
// Convert the coordinates to feature map scale
rois_shr[offset] = round(roi.x * spatialScale); //roi_start_w
rois_shr[offset + 1] = round(roi.y * spatialScale); //roi_start_h
rois_shr[offset + 2] = round(roi.z * spatialScale) - round(roi.x * spatialScale); //roi_length_w
rois_shr[offset + 3] = round(roi.w * spatialScale) - round(roi.y * spatialScale); // roi_length_h
}
// NC/xHW
int fmapOffset = blockIdx.x / fmapStep * H * W * fmapStep + blockIdx.x % fmapStep;
// Assumes #CTAs is just enough to cover all channels of all blocks
const DATA_T* bottom_data_offset = featureMap + fmapOffset;
if (FM_IN_SMEM)
{
// load the current channel to the shared memory
for (int j = threadIdx.x; j < H * W; j += blockDim.x)
{
feature_shr[j] = bottom_data_offset[j * fmapStep];
}
}
__syncthreads();
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
const int offset = j << 2;
// Force malformed ROIs to be 1x1
int roi_start_w = rois_shr[offset];
int roi_start_h = rois_shr[offset + 1];
int roi_width = max(rois_shr[offset + 2] + 1, 1);
int roi_height = max(rois_shr[offset + 3] + 1, 1);
float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(poolingH);
float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(poolingW);
for (int ph = 0; ph < poolingH; ++ph)
{
for (int pw = 0; pw < poolingW; ++pw)
{
int hstart = static_cast<int>(floor(static_cast<float>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
// In fact, clipping should be done in the RPN, but just in case...
hstart = min(max(hstart + roi_start_h, 0), H);
hend = min(max(hend + roi_start_h, 0), H);
wstart = min(max(wstart + roi_start_w, 0), W);
wend = min(max(wend + roi_start_w, 0), W);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
DATA_T maxval = is_empty ? 0 : -getMax<DATA_T>();
int maxId = -1;
DATA_T data = 0;
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int index = h * W + w;
if (FM_IN_SMEM)
{
data = feature_shr[index];
}
else
{
data = bottom_data_offset[index * fmapStep];
}
if (data > maxval)
{
maxval = data;
maxId = index;
}
}
}
top[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxval;
if (!INFER_ONLY)
{
maxIds[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxId;
}
} //for:pw
} //for:ph
} // for:j
}
template <typename DATA_T, DLayout_t DATA_L, typename ROI_T, bool INFER_ONLY>
pluginStatus_t ROIPoolingForwardKernelAlignedLauncher(hipStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale, const void* rois, const void* featureMap, void* top, int* maxIds, size_t deviceSmemSize)
{
size_t roiShmemSize = (R / N) * 4 * sizeof(ROI_T);
#if ROIPOOLING_FEATURE_MAP_USE_SHMEM
size_t shmemSize = H * W * sizeof(DATA_T) + roiShmemSize;
const bool fmap_in_shmem = true;
#else
size_t shmemSize = roiShmemSize;
const bool fmap_in_shmem = false;
#endif
if (shmemSize > deviceSmemSize)
{
return STATUS_BAD_PARAM;
}
// in the aligned version of ROI Pooling R should always be a multiple of N
PLUGIN_ASSERT(R % N == 0);
// NC/xHW
int32_t fmapStep = 1;
switch(DATA_L)
{
case NCHW: fmapStep = 1; break;
case NC4HW:
fmapStep = 4;
PLUGIN_ASSERT((N * C) % 4 == 0);
break;
case NC32HW:
fmapStep = 32;
PLUGIN_ASSERT((N * C) % 32 == 0);
break;
default: PLUGIN_ASSERT(false);
}
if (shmemSize > 48 * 1024)
{
PLUGIN_CHECK(hipFuncSetAttribute(&ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, true>,
hipFuncAttributeMaxDynamicSharedMemorySize, shmemSize));
}
hipLaunchKernelGGL(( ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, fmap_in_shmem>), dim3(N * C), dim3(256), shmemSize, stream, R / N,
(const ROI_T*) rois,
N, // feature map size
C, // feature map size
H, // feature map size
W, // feature map size
(const DATA_T*) featureMap, poolingH, poolingW, spatialScale, (DATA_T*) top, maxIds, fmapStep);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// ROI POOLING LAUNCH CONFIG
typedef pluginStatus_t (*roiFwd)(hipStream_t,
const int, //R, // TOTAL number of rois -> ~nmsMaxOut * N
const int, //N, // Batch size
const int, //C, // Channels
const int, //H, // Input feature map H
const int, //W, // Input feature map W
const int, //poolingH, // Output feature map H
const int, //poolingW, // Output feature map W
const float, //spatialScale,
const void*, //rois,
const void*, //featureMap,
void*, //top
int*, //maxIds
size_t); //device shmem size
// struct
struct roiFwdLaunchConfig
{
DataType t_rois;
DataType t_featureMap;
DLayout_t l_featureMap;
DataType t_top;
DLayout_t l_top;
bool inferOnly;
roiFwd function;
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
{
}
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly,
roiFwd function)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
, function(function)
{
}
bool operator==(const roiFwdLaunchConfig& other)
{
return (t_rois == other.t_rois)
&& (t_featureMap == other.t_featureMap)
&& (l_featureMap == other.l_featureMap)
&& (t_top == other.t_top)
&& (l_top == other.l_top)
&& (inferOnly == other.inferOnly);
}
};
#define FLOAT32 nvinfer1::DataType::kFLOAT
#define INT8 nvinfer1::DataType::kINT8
static std::array<roiFwdLaunchConfig, 6> roiFwdLCOptions = {
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, false, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, false>),
roiFwdLaunchConfig(FLOAT32, INT8, NCHW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC4HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC4HW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC32HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC32HW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NC4HW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NC4HW, float, true>)};
// ROI INFERENCE
pluginStatus_t roiInference(hipStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale,
const nvinfer1::DataType t_rois,
const void* rois,
const nvinfer1::DataType t_featureMap,
const DLayout_t l_featureMap,
const void* featureMap,
const nvinfer1::DataType t_top,
const DLayout_t l_top,
void* top,
size_t deviceSmemSize)
{
if (featureMap == NULL || rois == NULL || top == NULL)
{
return STATUS_BAD_PARAM;
}
DEBUG_PRINTF("&&&& ROIS %u\n", hash(rois, R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& FMAP %u\n", hash(featureMap, N * C * H * W * sizeof(float)));
roiFwdLaunchConfig rflc = roiFwdLaunchConfig(t_rois, t_featureMap, l_featureMap, t_top, l_top, true);
ASSERT_PARAM(R > 0);
for (unsigned i = 0; i < roiFwdLCOptions.size(); i++)
{
if (rflc == roiFwdLCOptions[i])
{
DEBUG_PRINTF("$$$$ ROI KERNEL %d\n", i);
return roiFwdLCOptions[i].function(stream,
R, N, C, H, W, poolingH, poolingW,
spatialScale, rois, featureMap, top, NULL, deviceSmemSize);
}
}
return STATUS_BAD_PARAM;
}
| ce36887525e4321e942499252e721344d90a2e77.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#include <algorithm>
#include <array>
#include <assert.h>
#include <cfloat>
#include <cstdio>
#include <math.h>
#include <stdio.h>
// This macro is to control shared memory usage. If set to 1, kernel loads the whole feature map
// into shared memory for reuse; If set to 0, kernel loads data from global memory directly.
// Roi pooling performance is data dependent. You can test which value is better to your data.
// If all bboxes are very small, 0 is recommended, otherwise, shared memory will load many unused
// data; If bboxes have many overlaps, 1 is recommended to avoid duplicate loads.
// 1 requires larger shared memory size. It may fail if it is larger than CUDA allowed per-block
// shared memory size upper bound. Then you have to use 0.
#define ROIPOOLING_FEATURE_MAP_USE_SHMEM 1
template <typename T>
__device__ T getMax();
template <>
__device__ __forceinline__ int8_t getMax<int8_t>()
{
return INT8_MAX;
}
template <>
__device__ __forceinline__ float getMax<float>()
{
return FLT_MAX;
}
// ROI POOLING FORWARD KERNEL
template <typename DATA_T, typename ROI_T, bool INFER_ONLY, bool FM_IN_SMEM>
__global__ void ROIPoolingForwardKernelAligned(int32_t ROICount, const ROI_T* rois,
int32_t N, // feature map size
int32_t C, // feature map size
int32_t H, // feature map size
int32_t W, // feature map size
const DATA_T* featureMap, const int32_t poolingH, const int32_t poolingW, const float spatialScale, DATA_T* top,
int32_t* maxIds, int32_t fmapStep)
{
extern __shared__ float smem[];
DATA_T* feature_shr = (DATA_T*) &smem[0];
int* rois_shr = nullptr;
if (FM_IN_SMEM)
{
rois_shr = (int*) &feature_shr[H * W];
}
else
{
rois_shr = (int*) &feature_shr[0];
feature_shr = nullptr;
}
const int batch = blockIdx.x / C;
const int channel = blockIdx.x % C;
// load ROIs to shared memory
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
int offset = j << 2;
float4 roi = reinterpret_cast<float4*>(const_cast<float*>(rois))[batch * ROICount + j];
// spatialScale = 1.0 / featureStride
// Convert the coordinates to feature map scale
rois_shr[offset] = round(roi.x * spatialScale); //roi_start_w
rois_shr[offset + 1] = round(roi.y * spatialScale); //roi_start_h
rois_shr[offset + 2] = round(roi.z * spatialScale) - round(roi.x * spatialScale); //roi_length_w
rois_shr[offset + 3] = round(roi.w * spatialScale) - round(roi.y * spatialScale); // roi_length_h
}
// NC/xHW
int fmapOffset = blockIdx.x / fmapStep * H * W * fmapStep + blockIdx.x % fmapStep;
// Assumes #CTAs is just enough to cover all channels of all blocks
const DATA_T* bottom_data_offset = featureMap + fmapOffset;
if (FM_IN_SMEM)
{
// load the current channel to the shared memory
for (int j = threadIdx.x; j < H * W; j += blockDim.x)
{
feature_shr[j] = bottom_data_offset[j * fmapStep];
}
}
__syncthreads();
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
const int offset = j << 2;
// Force malformed ROIs to be 1x1
int roi_start_w = rois_shr[offset];
int roi_start_h = rois_shr[offset + 1];
int roi_width = max(rois_shr[offset + 2] + 1, 1);
int roi_height = max(rois_shr[offset + 3] + 1, 1);
float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(poolingH);
float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(poolingW);
for (int ph = 0; ph < poolingH; ++ph)
{
for (int pw = 0; pw < poolingW; ++pw)
{
int hstart = static_cast<int>(floor(static_cast<float>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
// In fact, clipping should be done in the RPN, but just in case...
hstart = min(max(hstart + roi_start_h, 0), H);
hend = min(max(hend + roi_start_h, 0), H);
wstart = min(max(wstart + roi_start_w, 0), W);
wend = min(max(wend + roi_start_w, 0), W);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
DATA_T maxval = is_empty ? 0 : -getMax<DATA_T>();
int maxId = -1;
DATA_T data = 0;
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int index = h * W + w;
if (FM_IN_SMEM)
{
data = feature_shr[index];
}
else
{
data = bottom_data_offset[index * fmapStep];
}
if (data > maxval)
{
maxval = data;
maxId = index;
}
}
}
top[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxval;
if (!INFER_ONLY)
{
maxIds[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxId;
}
} //for:pw
} //for:ph
} // for:j
}
template <typename DATA_T, DLayout_t DATA_L, typename ROI_T, bool INFER_ONLY>
pluginStatus_t ROIPoolingForwardKernelAlignedLauncher(cudaStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale, const void* rois, const void* featureMap, void* top, int* maxIds, size_t deviceSmemSize)
{
size_t roiShmemSize = (R / N) * 4 * sizeof(ROI_T);
#if ROIPOOLING_FEATURE_MAP_USE_SHMEM
size_t shmemSize = H * W * sizeof(DATA_T) + roiShmemSize;
const bool fmap_in_shmem = true;
#else
size_t shmemSize = roiShmemSize;
const bool fmap_in_shmem = false;
#endif
if (shmemSize > deviceSmemSize)
{
return STATUS_BAD_PARAM;
}
// in the aligned version of ROI Pooling R should always be a multiple of N
PLUGIN_ASSERT(R % N == 0);
// NC/xHW
int32_t fmapStep = 1;
switch(DATA_L)
{
case NCHW: fmapStep = 1; break;
case NC4HW:
fmapStep = 4;
PLUGIN_ASSERT((N * C) % 4 == 0);
break;
case NC32HW:
fmapStep = 32;
PLUGIN_ASSERT((N * C) % 32 == 0);
break;
default: PLUGIN_ASSERT(false);
}
if (shmemSize > 48 * 1024)
{
PLUGIN_CHECK(cudaFuncSetAttribute(&ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, true>,
cudaFuncAttributeMaxDynamicSharedMemorySize, shmemSize));
}
ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, fmap_in_shmem><<<N * C, 256, shmemSize, stream>>>(R / N,
(const ROI_T*) rois,
N, // feature map size
C, // feature map size
H, // feature map size
W, // feature map size
(const DATA_T*) featureMap, poolingH, poolingW, spatialScale, (DATA_T*) top, maxIds, fmapStep);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// ROI POOLING LAUNCH CONFIG
typedef pluginStatus_t (*roiFwd)(cudaStream_t,
const int, //R, // TOTAL number of rois -> ~nmsMaxOut * N
const int, //N, // Batch size
const int, //C, // Channels
const int, //H, // Input feature map H
const int, //W, // Input feature map W
const int, //poolingH, // Output feature map H
const int, //poolingW, // Output feature map W
const float, //spatialScale,
const void*, //rois,
const void*, //featureMap,
void*, //top
int*, //maxIds
size_t); //device shmem size
// struct
struct roiFwdLaunchConfig
{
DataType t_rois;
DataType t_featureMap;
DLayout_t l_featureMap;
DataType t_top;
DLayout_t l_top;
bool inferOnly;
roiFwd function;
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
{
}
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly,
roiFwd function)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
, function(function)
{
}
bool operator==(const roiFwdLaunchConfig& other)
{
return (t_rois == other.t_rois)
&& (t_featureMap == other.t_featureMap)
&& (l_featureMap == other.l_featureMap)
&& (t_top == other.t_top)
&& (l_top == other.l_top)
&& (inferOnly == other.inferOnly);
}
};
#define FLOAT32 nvinfer1::DataType::kFLOAT
#define INT8 nvinfer1::DataType::kINT8
static std::array<roiFwdLaunchConfig, 6> roiFwdLCOptions = {
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, false, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, false>),
roiFwdLaunchConfig(FLOAT32, INT8, NCHW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC4HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC4HW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC32HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC32HW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NC4HW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NC4HW, float, true>)};
// ROI INFERENCE
pluginStatus_t roiInference(cudaStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale,
const nvinfer1::DataType t_rois,
const void* rois,
const nvinfer1::DataType t_featureMap,
const DLayout_t l_featureMap,
const void* featureMap,
const nvinfer1::DataType t_top,
const DLayout_t l_top,
void* top,
size_t deviceSmemSize)
{
if (featureMap == NULL || rois == NULL || top == NULL)
{
return STATUS_BAD_PARAM;
}
DEBUG_PRINTF("&&&& ROIS %u\n", hash(rois, R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& FMAP %u\n", hash(featureMap, N * C * H * W * sizeof(float)));
roiFwdLaunchConfig rflc = roiFwdLaunchConfig(t_rois, t_featureMap, l_featureMap, t_top, l_top, true);
ASSERT_PARAM(R > 0);
for (unsigned i = 0; i < roiFwdLCOptions.size(); i++)
{
if (rflc == roiFwdLCOptions[i])
{
DEBUG_PRINTF("$$$$ ROI KERNEL %d\n", i);
return roiFwdLCOptions[i].function(stream,
R, N, C, H, W, poolingH, poolingW,
spatialScale, rois, featureMap, top, NULL, deviceSmemSize);
}
}
return STATUS_BAD_PARAM;
}
|
6ff866e3290039947352d8de75a2e03e44f15c84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
return;
if (idata[index])
bools[index] = 1;
else
bools[index] = 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
return;
if (bools[index])
odata[indices[index]] = idata[index];
}
}
}
| 6ff866e3290039947352d8de75a2e03e44f15c84.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
return;
if (idata[index])
bools[index] = 1;
else
bools[index] = 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n)
return;
if (bools[index])
odata[indices[index]] = idata[index];
}
}
}
|
46ee01bd318013abd27a84f34652d7d039ed4b08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file : activationf.cu
* @brief : activation functions content/source file in CUDA C++14,
* @author : Ernest Yeung <[email protected]>
* @date : 20171020
* @ref :
*
* If you find this code useful, feel free to donate directly and easily at this direct PayPal link:
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -std=c++14 -lcublas -dc Axon.cu -o Axon.o
*
* */
#include "activationf.h"
// 0
__global__ void identity_kernel(const int SIZE, float*z) {
}
__global__ void D_identity_kernel(const int SIZE, const float* z, float* d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
d_a[tid]= 1.0f;
}
}
// 1
__global__ void sigmoid_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.f/(1.f + expf(-a_val));
z[tid]=a_val;
}
}
__global__ void D_sigmoid_kernel(const int SIZE, const float* z, float* d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.f/(1.f + expf(-a_val));
a_val = a_val * ( 1.0f - a_val );
d_a[tid]=a_val;
}
}
// 2
__global__ void tanh_kernel(const int SIZE, float*z)
{
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = tanhf(a_val);
z[tid] = a_val;
}
}
__global__ void D_tanh_kernel(const int SIZE, const float* z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = tanhf(a_val);
a_val = 1.0f - (a_val)*a_val;
d_a[tid] = a_val;
}
}
__global__ void arctan_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = atanf(a_val);
z[tid] = a_val;
}
}
__global__ void D_arctan_kernel(const int SIZE, const float* z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.0f / ( 1.0f + a_val*a_val);
d_a[tid] = a_val;
}
}
__global__ void ReLU_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
if (a_val < 0.f) {
z[tid] = 0.f;
}
}
}
__global__ void D_ReLU_kernel(const int SIZE, const float*z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
if (a_val < 0.f) {
d_a[tid] = 0.f;
} else {
d_a[tid] = 1.0f;
}
}
}
/**
* @fn Gaussian_kernel
* @param c
* @param sigma_dev
* @note exp(-(z-c)^2 / (2.f * sigma_dev*sigma_dev) )
* */
__global__ void Gaussian_kernel(const int SIZE, float* z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = expf( -1.0f * ( a_val - 0.f)*(a_val-0.f) / 2.0f / (1.f*1.f) ) ;
z[tid] = a_val;
}
}
/**
* @fn D_Gaussian_kernel
* @brief derivative of Gaussian_kernel
* @param c
* @param sigma_dev
* @note -(z-c) / ( sigma_dev*sigma_dev) * exp(-(z-c)^2 / (2.f * sigma_dev*sigma_dev) )
* */
__global__ void D_Gaussian_kernel(const int SIZE, const float* z, float*d_a)
{
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = -1.0f * (a_val - 0.f)/(1.f*1.f) *
expf( -1.0f * ( a_val - 0.f)*(a_val-0.f) / 2.0f / (1.f*1.f) ) ;
d_a[tid] = a_val;
}
}
| 46ee01bd318013abd27a84f34652d7d039ed4b08.cu | /**
* @file : activationf.cu
* @brief : activation functions content/source file in CUDA C++14,
* @author : Ernest Yeung <[email protected]>
* @date : 20171020
* @ref :
*
* If you find this code useful, feel free to donate directly and easily at this direct PayPal link:
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -std=c++14 -lcublas -dc Axon.cu -o Axon.o
*
* */
#include "activationf.h"
// 0
__global__ void identity_kernel(const int SIZE, float*z) {
}
__global__ void D_identity_kernel(const int SIZE, const float* z, float* d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
d_a[tid]= 1.0f;
}
}
// 1
__global__ void sigmoid_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.f/(1.f + expf(-a_val));
z[tid]=a_val;
}
}
__global__ void D_sigmoid_kernel(const int SIZE, const float* z, float* d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.f/(1.f + expf(-a_val));
a_val = a_val * ( 1.0f - a_val );
d_a[tid]=a_val;
}
}
// 2
__global__ void tanh_kernel(const int SIZE, float*z)
{
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = tanhf(a_val);
z[tid] = a_val;
}
}
__global__ void D_tanh_kernel(const int SIZE, const float* z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = tanhf(a_val);
a_val = 1.0f - (a_val)*a_val;
d_a[tid] = a_val;
}
}
__global__ void arctan_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = atanf(a_val);
z[tid] = a_val;
}
}
__global__ void D_arctan_kernel(const int SIZE, const float* z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = 1.0f / ( 1.0f + a_val*a_val);
d_a[tid] = a_val;
}
}
__global__ void ReLU_kernel(const int SIZE, float*z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
if (a_val < 0.f) {
z[tid] = 0.f;
}
}
}
__global__ void D_ReLU_kernel(const int SIZE, const float*z, float*d_a) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
if (a_val < 0.f) {
d_a[tid] = 0.f;
} else {
d_a[tid] = 1.0f;
}
}
}
/**
* @fn Gaussian_kernel
* @param c
* @param sigma_dev
* @note exp(-(z-c)^2 / (2.f * sigma_dev*sigma_dev) )
* */
__global__ void Gaussian_kernel(const int SIZE, float* z) {
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = expf( -1.0f * ( a_val - 0.f)*(a_val-0.f) / 2.0f / (1.f*1.f) ) ;
z[tid] = a_val;
}
}
/**
* @fn D_Gaussian_kernel
* @brief derivative of Gaussian_kernel
* @param c
* @param sigma_dev
* @note -(z-c) / ( sigma_dev*sigma_dev) * exp(-(z-c)^2 / (2.f * sigma_dev*sigma_dev) )
* */
__global__ void D_Gaussian_kernel(const int SIZE, const float* z, float*d_a)
{
int kx = threadIdx.x + blockDim.x * blockIdx.x;
if (kx >= SIZE) { return; }
for (int tid=kx; tid < SIZE; tid += gridDim.x*blockDim.x)
{
float a_val = z[tid];
a_val = -1.0f * (a_val - 0.f)/(1.f*1.f) *
expf( -1.0f * ( a_val - 0.f)*(a_val-0.f) / 2.0f / (1.f*1.f) ) ;
d_a[tid] = a_val;
}
}
|
6973b7774cbab3d1473ca82dc01696b433affd3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/native/Resize.h>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace {
template <typename T, typename IndexType, int Dim, bool Order>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType numTopKSlices,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#ifdef __HIP_PLATFORM_HCC__
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue = ScalarConvert<int, T>::to(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>(
inputSliceStart, outputSliceSize,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = THCRoundUp(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (Order) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
} // namespace
TORCH_IMPL_FUNC(topk_out_cuda)
(const Tensor& self,
int64_t k, int64_t dim, bool largest, bool sorted,
const Tensor& values,
const Tensor& indices) {
TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3};
checkAllSameGPU("topk_out_cuda", {topK_arg, indices_arg, input_arg});
dim = at::maybe_wrap_dim(dim, self);
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
Tensor input = self.contiguous();
// If k is 0 the result is an empty tensor, so we don't need to launch a kernel.
if (k == 0) {
return;
}
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define RUN_DIR(INDEX_T, DIM) \
if (largest) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \
\
dim3 block(::min(at::cuda::ATenCeilDiv(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && values.numel() > 1) {
if (should_use_small_sort(values, dim)) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
sortKeyValueInplace(values, indices, dim, largest);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
Tensor sortedIndices = at::empty_like(indices);
// FIXME: remove const_cast once sort_out cuda is ported to structured
sort_out_cuda(const_cast<Tensor&>(values), dim, largest, const_cast<Tensor&>(values), const_cast<Tensor&>(sortedIndices));
indices.copy_(indices.gather(dim, sortedIndices));
}
}
}
} // at::native
} // at
| 6973b7774cbab3d1473ca82dc01696b433affd3d.cu | #include <ATen/ATen.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/native/Resize.h>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace {
template <typename T, typename IndexType, int Dim, bool Order>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType numTopKSlices,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#ifdef __HIP_PLATFORM_HCC__
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue = ScalarConvert<int, T>::to(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>(
inputSliceStart, outputSliceSize,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = THCRoundUp(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (Order) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : ScalarConvert<int, T>::to(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
exclusiveBinaryPrefixScan<int, true>(smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
} // namespace
TORCH_IMPL_FUNC(topk_out_cuda)
(const Tensor& self,
int64_t k, int64_t dim, bool largest, bool sorted,
const Tensor& values,
const Tensor& indices) {
TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3};
checkAllSameGPU("topk_out_cuda", {topK_arg, indices_arg, input_arg});
dim = at::maybe_wrap_dim(dim, self);
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
Tensor input = self.contiguous();
// If k is 0 the result is an empty tensor, so we don't need to launch a kernel.
if (k == 0) {
return;
}
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<scalar_t, INDEX_T, DIM, DIR> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define RUN_DIR(INDEX_T, DIM) \
if (largest) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \
\
dim3 block(std::min(at::cuda::ATenCeilDiv(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && values.numel() > 1) {
if (should_use_small_sort(values, dim)) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
sortKeyValueInplace(values, indices, dim, largest);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
Tensor sortedIndices = at::empty_like(indices);
// FIXME: remove const_cast once sort_out cuda is ported to structured
sort_out_cuda(const_cast<Tensor&>(values), dim, largest, const_cast<Tensor&>(values), const_cast<Tensor&>(sortedIndices));
indices.copy_(indices.gather(dim, sortedIndices));
}
}
}
} // at::native
} // at
|
0fce2d69f633c08124186b1f6352562a08d8c1f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
zcompact_kernel(
int m, int n,
magmaDoubleComplex *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
zcompactactive_kernel(
int m,
int n,
magmaDoubleComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcompact(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( zcompact_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zcompactActive(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
hipLaunchKernelGGL(( zcompactactive_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
| 0fce2d69f633c08124186b1f6352562a08d8c1f5.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magmasparse_internal.h"
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread handles one row, iterating across all columns.
*/
__global__ void
zcompact_kernel(
int m, int n,
magmaDoubleComplex *dA,
int ldda,
double *dnorms,
double tol,
magma_int_t *active,
magma_int_t *cBlock)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (dnorms[j] > tol && active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
else if (i==0)
active[j] = 0;
}
}
if (i==0)
*cBlock = cBlockSize;
}
__global__ void
zcompactactive_kernel(
int m,
int n,
magmaDoubleComplex *dA,
int ldda,
magma_int_t *active)
{
// dA is processed across row i (by the current thread)
int i = blockIdx.x*blockDim.x + threadIdx.x;
int cBlockSize = 0;
if ( i < m ) {
dA += i;
for(int j = 0; j<n; j++){
if (active[j]){
dA[ldda*cBlockSize] = dA[ldda*j];
cBlockSize++;
}
}
}
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACT takes a set of n vectors of size m (in dA) and their norms and
compacts them into the cBlock size<=n vectors that have norms > tol.
The active mask array has 1 or 0, showing if a vector remained or not
in the compacted resulting set of vectors.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
dnorms DOUBLE PRECISION array, dimension N
The norms of the N vectors in dA
@param[in]
tol DOUBLE PRECISON
The tolerance value used in the criteria to compact or not.
@param[in,out]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in,out]
cBlock magmaInt_ptr
The number of vectors that remain in dA (i.e., with norms > tol).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zcompact(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaDouble_ptr dnorms,
double tol,
magmaInt_ptr active,
magmaInt_ptr cBlock,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
zcompact_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, dnorms, tol, active, active+n );
magma_igetvector( 1, active+n, 1, cBlock, 1, queue );
return info;
}
/* ===================================================================== */
/**
Purpose
-------
ZCOMPACTACTIVE takes a set of n vectors of size m (in dA) and an
array of 1s and 0sindicating which vectors to compact (for 1s) and
which to disregard (for 0s).
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in,out]
dA COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
active INTEGER array, dimension N
A mask of 1s and 0s showing if a vector remains or has been removed
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zcompactActive(
magma_int_t m,
magma_int_t n,
magmaDoubleComplex_ptr dA,
magma_int_t ldda,
magmaInt_ptr active,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
if ( m == 0 || n == 0 )
return info;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
zcompactactive_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
m, n, dA, ldda, active);
return info;
}
/* ===================================================================== */
|
05ec798ccc7c38cf79494afd417c844ff0ddeb11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include "nbody.h"
#include "../configuration.h"
#ifdef OPTION_RENDER
#include "../rendering.h"
#endif // OPTION_RENDER
// Allocator handles.
AllocatorHandle<AllocatorT>* allocator_handle;
__device__ AllocatorT* device_allocator;
// Helper variable for checksum computation.
__device__ float device_checksum;
#ifdef OPTION_RENDER
// Helper variables for drawing.
__device__ int draw_counter = 0;
__device__ float Body_pos_x[kNumBodies];
__device__ float Body_pos_y[kNumBodies];
__device__ float Body_mass[kNumBodies];
#endif // OPTION_RENDER
__DEV__ Body::Body(float pos_x, float pos_y,
float vel_x, float vel_y, float mass)
: pos_x_(pos_x), pos_y_(pos_y),
vel_x_(vel_x), vel_y_(vel_y), mass_(mass) {}
__DEV__ void Body::compute_force() {
force_x_ = 0.0f;
force_y_ = 0.0f;
device_allocator->template device_do<Body>(&Body::apply_force, this);
}
__DEV__ void Body::apply_force(Body* other) {
// Update `other`.
if (other != this) {
float dx = pos_x_ - other->pos_x_;
float dy = pos_y_ - other->pos_y_;
float dist = sqrt(dx*dx + dy*dy);
float F = kGravityConstant * mass_ * other->mass_
/ (dist * dist + kDampeningFactor);
other->force_x_ += F*dx / dist;
other->force_y_ += F*dy / dist;
}
}
__DEV__ void Body::update() {
vel_x_ += force_x_*kDt / mass_;
vel_y_ += force_y_*kDt / mass_;
pos_x_ += vel_x_*kDt;
pos_y_ += vel_y_*kDt;
if (pos_x_ < -1 || pos_x_ > 1) {
vel_x_ = -vel_x_;
}
if (pos_y_ < -1 || pos_y_ > 1) {
vel_y_ = -vel_y_;
}
}
__DEV__ void Body::add_checksum() {
atomicAdd(&device_checksum, pos_x_ + pos_y_*2 + vel_x_*3 + vel_y_*4);
}
__global__ void kernel_compute_checksum() {
device_checksum = 0.0f;
device_allocator->device_do<Body>(&Body::add_checksum);
}
__global__ void kernel_initialize_bodies() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
hiprandState_t rand_state;
hiprand_init(kSeed, tid, 0, &rand_state);
for (int i = tid; i < kNumBodies; i += blockDim.x * gridDim.x) {
new(device_allocator) Body(
/*pos_x=*/ 2 * hiprand_uniform(&rand_state) - 1,
/*pos_y=*/ 2 * hiprand_uniform(&rand_state) - 1,
/*vel_x=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/ (hiprand_uniform(&rand_state)/2 + 0.5) * kMaxMass);
}
}
#ifdef OPTION_RENDER
__DEV__ void Body::add_to_draw_array() {
int idx = atomicAdd(&draw_counter, 1);
Body_pos_x[idx] = pos_x_;
Body_pos_y[idx] = pos_y_;
Body_mass[idx] = mass_;
}
__global__ void kernel_reset_draw_counters() {
draw_counter = 0;
}
void render_frame() {
// Host-side variables for rendering.
float host_Body_pos_x[kNumBodies];
float host_Body_pos_y[kNumBodies];
float host_Body_mass[kNumBodies];
hipLaunchKernelGGL(( kernel_reset_draw_counters), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
allocator_handle->parallel_do<Body, &Body::add_to_draw_array>();
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(host_Body_pos_x, Body_pos_x,
sizeof(float)*kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_pos_y, Body_pos_y, sizeof(float)*kNumBodies, 0,
hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_mass, Body_mass, sizeof(float)*kNumBodies, 0,
hipMemcpyDeviceToHost);
draw(host_Body_pos_x, host_Body_pos_y, host_Body_mass);
}
#endif // OPTION_RENDER
int main(int /*argc*/, char** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
// Create new allocator.
allocator_handle = new AllocatorHandle<AllocatorT>();
AllocatorT* dev_ptr = allocator_handle->device_pointer();
hipMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0,
hipMemcpyHostToDevice);
auto time_start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( kernel_initialize_bodies), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
for (int i = 0; i < kNumIterations; ++i) {
#ifndef NDEBUG
// Print debug information.
allocator_handle->DBG_print_state_stats();
#endif // NDEBUG
allocator_handle->parallel_do<Body, &Body::compute_force>();
allocator_handle->parallel_do<Body, &Body::update>();
#ifdef OPTION_RENDER
render_frame();
#endif // OPTION_RENDER
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed)
.count();
printf("%lu, %lu\n", micros, allocator_handle->DBG_get_enumeration_time());
#ifndef NDEBUG
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
float checksum;
hipMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
hipMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
| 05ec798ccc7c38cf79494afd417c844ff0ddeb11.cu | #include <chrono>
#include "nbody.h"
#include "../configuration.h"
#ifdef OPTION_RENDER
#include "../rendering.h"
#endif // OPTION_RENDER
// Allocator handles.
AllocatorHandle<AllocatorT>* allocator_handle;
__device__ AllocatorT* device_allocator;
// Helper variable for checksum computation.
__device__ float device_checksum;
#ifdef OPTION_RENDER
// Helper variables for drawing.
__device__ int draw_counter = 0;
__device__ float Body_pos_x[kNumBodies];
__device__ float Body_pos_y[kNumBodies];
__device__ float Body_mass[kNumBodies];
#endif // OPTION_RENDER
__DEV__ Body::Body(float pos_x, float pos_y,
float vel_x, float vel_y, float mass)
: pos_x_(pos_x), pos_y_(pos_y),
vel_x_(vel_x), vel_y_(vel_y), mass_(mass) {}
__DEV__ void Body::compute_force() {
force_x_ = 0.0f;
force_y_ = 0.0f;
device_allocator->template device_do<Body>(&Body::apply_force, this);
}
__DEV__ void Body::apply_force(Body* other) {
// Update `other`.
if (other != this) {
float dx = pos_x_ - other->pos_x_;
float dy = pos_y_ - other->pos_y_;
float dist = sqrt(dx*dx + dy*dy);
float F = kGravityConstant * mass_ * other->mass_
/ (dist * dist + kDampeningFactor);
other->force_x_ += F*dx / dist;
other->force_y_ += F*dy / dist;
}
}
__DEV__ void Body::update() {
vel_x_ += force_x_*kDt / mass_;
vel_y_ += force_y_*kDt / mass_;
pos_x_ += vel_x_*kDt;
pos_y_ += vel_y_*kDt;
if (pos_x_ < -1 || pos_x_ > 1) {
vel_x_ = -vel_x_;
}
if (pos_y_ < -1 || pos_y_ > 1) {
vel_y_ = -vel_y_;
}
}
__DEV__ void Body::add_checksum() {
atomicAdd(&device_checksum, pos_x_ + pos_y_*2 + vel_x_*3 + vel_y_*4);
}
__global__ void kernel_compute_checksum() {
device_checksum = 0.0f;
device_allocator->device_do<Body>(&Body::add_checksum);
}
__global__ void kernel_initialize_bodies() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
curandState rand_state;
curand_init(kSeed, tid, 0, &rand_state);
for (int i = tid; i < kNumBodies; i += blockDim.x * gridDim.x) {
new(device_allocator) Body(
/*pos_x=*/ 2 * curand_uniform(&rand_state) - 1,
/*pos_y=*/ 2 * curand_uniform(&rand_state) - 1,
/*vel_x=*/ (curand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/ (curand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/ (curand_uniform(&rand_state)/2 + 0.5) * kMaxMass);
}
}
#ifdef OPTION_RENDER
__DEV__ void Body::add_to_draw_array() {
int idx = atomicAdd(&draw_counter, 1);
Body_pos_x[idx] = pos_x_;
Body_pos_y[idx] = pos_y_;
Body_mass[idx] = mass_;
}
__global__ void kernel_reset_draw_counters() {
draw_counter = 0;
}
void render_frame() {
// Host-side variables for rendering.
float host_Body_pos_x[kNumBodies];
float host_Body_pos_y[kNumBodies];
float host_Body_mass[kNumBodies];
kernel_reset_draw_counters<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
allocator_handle->parallel_do<Body, &Body::add_to_draw_array>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(host_Body_pos_x, Body_pos_x,
sizeof(float)*kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_pos_y, Body_pos_y, sizeof(float)*kNumBodies, 0,
cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_mass, Body_mass, sizeof(float)*kNumBodies, 0,
cudaMemcpyDeviceToHost);
draw(host_Body_pos_x, host_Body_pos_y, host_Body_mass);
}
#endif // OPTION_RENDER
int main(int /*argc*/, char** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
// Create new allocator.
allocator_handle = new AllocatorHandle<AllocatorT>();
AllocatorT* dev_ptr = allocator_handle->device_pointer();
cudaMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0,
cudaMemcpyHostToDevice);
auto time_start = std::chrono::system_clock::now();
kernel_initialize_bodies<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
for (int i = 0; i < kNumIterations; ++i) {
#ifndef NDEBUG
// Print debug information.
allocator_handle->DBG_print_state_stats();
#endif // NDEBUG
allocator_handle->parallel_do<Body, &Body::compute_force>();
allocator_handle->parallel_do<Body, &Body::update>();
#ifdef OPTION_RENDER
render_frame();
#endif // OPTION_RENDER
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed)
.count();
printf("%lu, %lu\n", micros, allocator_handle->DBG_get_enumeration_time());
#ifndef NDEBUG
kernel_compute_checksum<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
float checksum;
cudaMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
cudaMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
|
3ebad20883afdc9b23d049a34e2b1106b2d106b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: superresolution
* file: superresolution.cu
*
*
* implement all functions with ### implement me ### in the function body
\****************************************************************************/
/*
* superresolution.cu
*
* Created on: May 16, 2012
* Author: steinbrf
*/
#include "superresolution.cuh"
#include <stdio.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include <auxiliary/cuda_basic.cuh>
#include <vector>
#include <list>
#include <sys/time.h>
timeval startFirstGauss, endFirstGauss;
#include <sstream>
#include <string.h>
//#include <linearoperations.cuh>
#include <linearoperations/linearoperations.cuh>
#include "superresolution_definitions.h"
#include <auxiliary/debug.hpp>
#ifdef DGT400
#define SR_BW 32
#define SR_BH 16
#else
#define SR_BW 16
#define SR_BH 16
#endif
//shared mem flags
#define SHARED_MEM 0
#define BACKWARDSWARPING_VALUE_TEXTURE_MEM 1
#define GAUSS_MEMORY 0 // 0 = global memory, 1 = shared memory, 2 = texture memory
#include <linearoperations/linearoperations.h>
extern __shared__ float smem[];
// kernel to compute the difference image
// factor_clipping acts as lower and upper limit
__global__ void dualL1Difference
(
const float *primal,
const float *constant,
float *dual,
int nx,
int ny,
int pitch,
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if( x < nx && y < ny ) // guards
{
int idx = x + pitch * y;
float dualTemp = (dual[idx] + tau_d * factor_update * (primal[idx] - constant[idx])) / huber_denom;
if( dualTemp < -factor_clipping)
{
dual[idx] = -factor_clipping;
}
else if( dualTemp > factor_clipping)
{
dual[idx] = factor_clipping;
}
else
{
dual[idx] = dualTemp;
}
}
}
//global memory version of primal1N
__global__ void primal1N_gm
(
const float *xi1,
const float *xi2,
const float *degraded,
float *u,
float *uor,
int nx,
int ny,
int pitch,
float factor_tv_update,
float factor_degrade_update,
float tau_p,
float overrelaxation
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if( x < nx && y < ny )
{
const int idx = y * pitch + x;
float u_old = u[idx];
float u_new = u_old + tau_p *
(
factor_tv_update *
(xi1[idx] - ( x == 0 ? 0.0f : xi1[idx - 1] ) + xi2[idx] - ( y == 0 ? 0.0f : xi2[idx - pitch] )) -
factor_degrade_update * degraded[idx]
);
// write back to output image
u[idx] = u_new;
uor[idx] = overrelaxation * u_new + (1.0f - overrelaxation) * u_old;
}
}
__global__ void primal1N_sm
(
const float *xi1,
const float *xi2,
const float *degraded,
float *u,
float *uor,
int nx,
int ny,
int pitch,
float factor_tv_update,
float factor_degrade_update,
float tau_p,
float overrelaxation
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int idx = y * pitch + x;
__shared__ float s_xi1[SR_BW + 1][SR_BH];
__shared__ float s_xi2[SR_BW][SR_BH + 1];
// loading data to shared memory
if (x < nx && y < ny)
{
s_xi1[tx+1][ty] = xi1[idx];
s_xi2[tx][ty+1] = xi2[idx];
if( x == 0 )
{
s_xi1[0][ty] = 0.0f;
}
else if( threadIdx.x == 0)
{
s_xi1[0][ty] = xi1[idx-1];
}
if( y == 0 )
{
s_xi2[tx][0] = 0.0f;
}
else if( threadIdx.y == 0 )
{
s_xi2[tx][0] = xi2[idx-pitch];
}
}
__syncthreads();
if (x < nx && y < ny)
{
float u_old = u[idx];
// change of indices for xi1 & xi2 due to the way shared memory copying is done !
// produces correct results
float u_new = u_old + tau_p * ( factor_tv_update *
( s_xi1[tx + 1][ty] - s_xi1[tx][ty] + s_xi2[tx][ty + 1] - s_xi2[tx][ty] ) -
factor_degrade_update * degraded[idx] );
// write back to output image
u[idx] = u_new;
uor[idx] = overrelaxation * u_new + (1.0f - overrelaxation) * u_old;
}
}
// global memory version of dualTVHuber
__global__ void dualTVHuber_gm
(
float *uor_g, // Field of overrelaxed primal variables
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
int nx, // New High-Resolution Width
int ny, // New High-Resolution Height
int pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < nx && y < ny ) // guards
{
int x1 = x + 1;
if( x1 >= nx ){ x1 = nx-1; } // at x boundary
int y1 = y+1;
if( y1 >= ny ){ y1 = ny-1; } // at y boundary
// do xi1_g, xi2_g & uor_g have same pitch ? confirm - YES
const int p = y * pitchf1 + x;
float dx = (xi1_g[p] + tau_d * factor_update * (uor_g[y * pitchf1 + x1] - uor_g[p])) / huber_denom;
float dy = (xi2_g[p] + tau_d * factor_update * (uor_g[y1 * pitchf1 + x] - uor_g[p])) / huber_denom;
float denom = sqrtf( dx * dx + dy * dy ) / factor_clipping;
if( denom < 1.0f )
denom = 1.0f;
xi1_g[p] = dx / denom;
xi2_g[p] = dy / denom;
}
}
// shared memory version of dualTVHuber
__global__ void dualTVHuber_sm
(
float *uor_g, // Field of overrelaxed primal variables
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
int nx, // New High-Resolution Width
int ny, // New High-Resolution Height
int pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float uor[SR_BW+1][SR_BH+1];
int idx = y * pitchf1 + x;
// load data into shared memory
// NOTE: not using shared memory for xi1 & xi2 reduces execution time from
// 420 to 403 micro sec on 48 core GPU
if( x < nx && y < ny ) // guards
{
uor[tx][ty] = uor_g[idx];
if( x == nx -1 )
{
uor[tx+1][ty] = uor[tx][ty];
}
else if( threadIdx.x == SR_BW - 1 )
{
uor[tx+1][ty] = uor_g[idx+1];
}
if( y == ny -1 )
{
uor[tx][ty+1] = uor[tx][ty];
}
else if( threadIdx.y == SR_BH -1 )
{
uor[tx][ty+1] = uor_g[idx+pitchf1];
}
}
__syncthreads();
if(x < nx && y < ny)// guards
{
// compute
float dx = (xi1_g[idx] + tau_d * factor_update * (uor[tx+1][ty] - uor[tx][ty])) / huber_denom;
float dy = (xi2_g[idx] + tau_d * factor_update * (uor[tx][ty+1] - uor[tx][ty])) / huber_denom;
float denom = sqrtf( dx * dx + dy * dy ) / factor_clipping;
if(denom < 1.0f) denom = 1.0f;
xi1_g[idx] = dx / denom;
xi2_g[idx] = dy / denom;
}
}
void computeSuperresolutionUngerGPU
(
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
float *temp1_g, // Helper array
float *temp2_g,
float *temp3_g,
float *temp4_g,
float *uor_g, // Field of overrelaxed primal variables
float *u_g, // GPU memory for the result image
std::vector<float*> &q_g, // Dual variables for L1 difference penalization
std::vector<float*> &images_g, // Input images in original resolution
std::list<FlowGPU> &flowsGPU, // GPU memory for the displacement fields
// class FlowGPU { void clear(); float *u_g; float *v_g; int nx; int ny; }
int &nx, // New High-Resolution Width
int &ny, // New High-Resolution Height
int &pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
int &nx_orig, // Original Low-Resolution Width
int &ny_orig, // Original Low-Resolution Height
int &pitchf1_orig, // GPU pitch (padded width) of the original low-res images
int &oi, // Number of Iterations
float &tau_p, // Primal Update Step Size
float &tau_d, // Dual Update Step Size
float &factor_tv, // The weight of Total Variation Penalization
float &huber_epsilon, // Parameter for Huber norm regularization
float &factor_rescale_x, // High-Resolution Width divided by Low-Resolution Width
float &factor_rescale_y, // High-Resolution Height divided by Low-Resolution Height
float &blur, // The amount of Gaussian Blur present in the degrading process
float &overrelaxation, // Overrelaxation parameter in the range of [1,2]
int debug // Debug Flag, if activated the class produces Debug output.
)
{
// grid and block dimensions
int gridsize_x = ((nx - 1) / SR_BW) + 1;
int gridsize_y = ((ny - 1) / SR_BH) + 1;
dim3 dimGrid ( gridsize_x, gridsize_y );
dim3 dimBlock ( SR_BW, SR_BH );
// initialise xi1_g and xi2_g to zero
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, xi1_g, nx, ny, pitchf1, 0.0f );
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, xi2_g, nx, ny, pitchf1, 0.0f );
// initialise u_g and uor_g to 64.0f (final output and overrelaxated superresolution image)
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_g, nx, ny, pitchf1, 64.0f );
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, uor_g, nx, ny, pitchf1, 64.0f );
// initialise all elements of q_g to zero (difference images)
for(unsigned int k = 0; k < q_g.size(); ++k )
{
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, q_g[k], nx_orig, ny_orig, pitchf1_orig, 0.0f );
}
float factorquad = factor_rescale_x * factor_rescale_x * factor_rescale_y * factor_rescale_y;
float factor_degrade_update = pow( factorquad, CLIPPING_TRADEOFF_DEGRADE_1N );
float factor_degrade_clipping = factorquad / factor_degrade_update;
float huber_denom_degrade = 1.0f + huber_epsilon * tau_d / factor_degrade_clipping;
float factor_tv_update = pow( factor_tv, CLIPPING_TRADEOFF_TV );
float factor_tv_clipping = factor_tv / factor_tv_update;
float huber_denom_tv = 1.0f + huber_epsilon * tau_d / factor_tv;
// outer iterations for convergence
for( int i = 0; i < oi; ++i )
{
// calculate dual tv
#if SHARED_MEM
hipLaunchKernelGGL(( dualTVHuber_sm), dim3(dimGrid),dim3(dimBlock), 0, 0,
uor_g, xi1_g, xi2_g, nx, ny, pitchf1, factor_tv_update, factor_tv_clipping, huber_denom_tv, tau_d );
#else
hipLaunchKernelGGL(( dualTVHuber_gm), dim3(dimGrid),dim3(dimBlock), 0, 0,
uor_g, xi1_g, xi2_g, nx, ny, pitchf1, factor_tv_update, factor_tv_clipping, huber_denom_tv, tau_d );
#endif
// DUAL DATA
// iterating over all images
std::vector<float*>::iterator image = images_g.begin();
std::list<FlowGPU>::iterator flow = flowsGPU.begin();
for( unsigned int k = 0; image != images_g.end() && flow != flowsGPU.end() && k < q_g.size(); ++k, ++flow, ++image )
{
// backward warping of upsampled input image using flow
#if BACKWARDSWARPING_VALUE_TEXTURE_MEM
backwardRegistrationBilinearValueTex ( uor_g, flow->u_g, flow->v_g, temp1_g, 0.0f, nx, ny, pitchf1, pitchf1, 1.0f, 1.0f );
#else
backwardRegistrationBilinearValueTex_gm ( uor_g, flow->u_g, flow->v_g, temp1_g, 0.0f, nx, ny, pitchf1, pitchf1, 1.0f, 1.0f );
#endif
if( blur > 0.0f )
{
// blur warped input image
#if GAUSS_MEMORY == 2
// gauss with texture memory
gaussBlurSeparateMirrorGpu ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#elif GAUSS_MEMORY == 1
// gauss with shared memory
gaussBlurSeparateMirrorGpu_sm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#else
// gauss with global memory
gaussBlurSeparateMirrorGpu_gm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#endif
}
else
{
// swap the helper array pointers, if not blurred
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
if( factor_rescale_x > 1.0f || factor_rescale_y > 1.0f )
{
// downsampling of blurred and warped image
resampleAreaParallelSeparate(
temp2_g, // input image
temp1_g, // output image
nx, ny, // input size
pitchf1, // input pitch
nx_orig, ny_orig, // output size
pitchf1_orig, // output pitch
temp4_g // helper array
);
}
else
{
// swap the helper array pointers
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
// compute difference between warped and downsampled input image
// and current small reference image (the one we want to compute the superresolution of)
hipLaunchKernelGGL(( dualL1Difference), dim3(dimGrid), dim3(dimBlock), 0, 0,
temp1_g, *image, q_g[k], nx_orig, ny_orig, pitchf1_orig,
factor_degrade_update, factor_degrade_clipping, huber_denom_degrade, tau_d);
}
// reset 3rd helper array to zero
hipLaunchKernelGGL(( setKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, temp3_g, nx, ny, pitchf1, 0.0f );
// iterating over all images
image = images_g.begin();
flow = flowsGPU.begin();
for( unsigned int k = 0; image != images_g.end() && flow != flowsGPU.end() && k < q_g.size(); ++k, ++flow, ++image )
{
if( factor_rescale_x > 1.0f || factor_rescale_y > 1.0f )
{
// upsample difference images
resampleAreaParallelSeparateAdjoined( q_g[k], temp1_g, nx_orig, ny_orig, pitchf1_orig, nx, ny, pitchf1, temp4_g );
}
else
{
// copy q_g[k] to temp1_g
hipMemcpy( temp1_g, q_g[k], ny * pitchf1, hipMemcpyDeviceToDevice );
}
if( blur > 0.0f )
{
// blur upsampled difference image
#if GAUSS_MEMORY == 2
// gauss with texture memory
gaussBlurSeparateMirrorGpu ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#elif GAUSS_MEMORY == 1
// gauss with shared memory
gaussBlurSeparateMirrorGpu_sm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#else
// gauss with global memory
gaussBlurSeparateMirrorGpu_gm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#endif
}
else
{
// swap the helper array pointers
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
// foreward warping of the difference image
forewardRegistrationBilinearAtomic (
flow->u_g, flow->v_g,
temp2_g, temp1_g,
nx, ny,
pitchf1
);
// sum all difference images up
hipLaunchKernelGGL(( addKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, temp1_g, temp3_g, nx, ny, pitchf1 );
}
#if SHARED_MEM
hipLaunchKernelGGL(( primal1N_sm), dim3(dimGrid), dim3(dimBlock), 0, 0, xi1_g, xi2_g, temp3_g, u_g, uor_g,
nx, ny, pitchf1, factor_tv_update, factor_degrade_update, tau_p, overrelaxation);
#else
hipLaunchKernelGGL(( primal1N_gm), dim3(dimGrid), dim3(dimBlock), 0, 0, xi1_g, xi2_g, temp3_g, u_g, uor_g,
nx, ny, pitchf1, factor_tv_update, factor_degrade_update, tau_p, overrelaxation);
#endif
}
}
| 3ebad20883afdc9b23d049a34e2b1106b2d106b4.cu | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: superresolution
* file: superresolution.cu
*
*
* implement all functions with ### implement me ### in the function body
\****************************************************************************/
/*
* superresolution.cu
*
* Created on: May 16, 2012
* Author: steinbrf
*/
#include "superresolution.cuh"
#include <stdio.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include <auxiliary/cuda_basic.cuh>
#include <vector>
#include <list>
#include <sys/time.h>
timeval startFirstGauss, endFirstGauss;
#include <sstream>
#include <string.h>
//#include <linearoperations.cuh>
#include <linearoperations/linearoperations.cuh>
#include "superresolution_definitions.h"
#include <auxiliary/debug.hpp>
#ifdef DGT400
#define SR_BW 32
#define SR_BH 16
#else
#define SR_BW 16
#define SR_BH 16
#endif
//shared mem flags
#define SHARED_MEM 0
#define BACKWARDSWARPING_VALUE_TEXTURE_MEM 1
#define GAUSS_MEMORY 0 // 0 = global memory, 1 = shared memory, 2 = texture memory
#include <linearoperations/linearoperations.h>
extern __shared__ float smem[];
// kernel to compute the difference image
// factor_clipping acts as lower and upper limit
__global__ void dualL1Difference
(
const float *primal,
const float *constant,
float *dual,
int nx,
int ny,
int pitch,
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if( x < nx && y < ny ) // guards
{
int idx = x + pitch * y;
float dualTemp = (dual[idx] + tau_d * factor_update * (primal[idx] - constant[idx])) / huber_denom;
if( dualTemp < -factor_clipping)
{
dual[idx] = -factor_clipping;
}
else if( dualTemp > factor_clipping)
{
dual[idx] = factor_clipping;
}
else
{
dual[idx] = dualTemp;
}
}
}
//global memory version of primal1N
__global__ void primal1N_gm
(
const float *xi1,
const float *xi2,
const float *degraded,
float *u,
float *uor,
int nx,
int ny,
int pitch,
float factor_tv_update,
float factor_degrade_update,
float tau_p,
float overrelaxation
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
if( x < nx && y < ny )
{
const int idx = y * pitch + x;
float u_old = u[idx];
float u_new = u_old + tau_p *
(
factor_tv_update *
(xi1[idx] - ( x == 0 ? 0.0f : xi1[idx - 1] ) + xi2[idx] - ( y == 0 ? 0.0f : xi2[idx - pitch] )) -
factor_degrade_update * degraded[idx]
);
// write back to output image
u[idx] = u_new;
uor[idx] = overrelaxation * u_new + (1.0f - overrelaxation) * u_old;
}
}
__global__ void primal1N_sm
(
const float *xi1,
const float *xi2,
const float *degraded,
float *u,
float *uor,
int nx,
int ny,
int pitch,
float factor_tv_update,
float factor_degrade_update,
float tau_p,
float overrelaxation
)
{
const int x = threadIdx.x + blockDim.x * blockIdx.x;
const int y = threadIdx.y + blockDim.y * blockIdx.y;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int idx = y * pitch + x;
__shared__ float s_xi1[SR_BW + 1][SR_BH];
__shared__ float s_xi2[SR_BW][SR_BH + 1];
// loading data to shared memory
if (x < nx && y < ny)
{
s_xi1[tx+1][ty] = xi1[idx];
s_xi2[tx][ty+1] = xi2[idx];
if( x == 0 )
{
s_xi1[0][ty] = 0.0f;
}
else if( threadIdx.x == 0)
{
s_xi1[0][ty] = xi1[idx-1];
}
if( y == 0 )
{
s_xi2[tx][0] = 0.0f;
}
else if( threadIdx.y == 0 )
{
s_xi2[tx][0] = xi2[idx-pitch];
}
}
__syncthreads();
if (x < nx && y < ny)
{
float u_old = u[idx];
// change of indices for xi1 & xi2 due to the way shared memory copying is done !
// produces correct results
float u_new = u_old + tau_p * ( factor_tv_update *
( s_xi1[tx + 1][ty] - s_xi1[tx][ty] + s_xi2[tx][ty + 1] - s_xi2[tx][ty] ) -
factor_degrade_update * degraded[idx] );
// write back to output image
u[idx] = u_new;
uor[idx] = overrelaxation * u_new + (1.0f - overrelaxation) * u_old;
}
}
// global memory version of dualTVHuber
__global__ void dualTVHuber_gm
(
float *uor_g, // Field of overrelaxed primal variables
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
int nx, // New High-Resolution Width
int ny, // New High-Resolution Height
int pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x < nx && y < ny ) // guards
{
int x1 = x + 1;
if( x1 >= nx ){ x1 = nx-1; } // at x boundary
int y1 = y+1;
if( y1 >= ny ){ y1 = ny-1; } // at y boundary
// do xi1_g, xi2_g & uor_g have same pitch ? confirm - YES
const int p = y * pitchf1 + x;
float dx = (xi1_g[p] + tau_d * factor_update * (uor_g[y * pitchf1 + x1] - uor_g[p])) / huber_denom;
float dy = (xi2_g[p] + tau_d * factor_update * (uor_g[y1 * pitchf1 + x] - uor_g[p])) / huber_denom;
float denom = sqrtf( dx * dx + dy * dy ) / factor_clipping;
if( denom < 1.0f )
denom = 1.0f;
xi1_g[p] = dx / denom;
xi2_g[p] = dy / denom;
}
}
// shared memory version of dualTVHuber
__global__ void dualTVHuber_sm
(
float *uor_g, // Field of overrelaxed primal variables
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
int nx, // New High-Resolution Width
int ny, // New High-Resolution Height
int pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
float factor_update,
float factor_clipping,
float huber_denom,
float tau_d
)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float uor[SR_BW+1][SR_BH+1];
int idx = y * pitchf1 + x;
// load data into shared memory
// NOTE: not using shared memory for xi1 & xi2 reduces execution time from
// 420 to 403 micro sec on 48 core GPU
if( x < nx && y < ny ) // guards
{
uor[tx][ty] = uor_g[idx];
if( x == nx -1 )
{
uor[tx+1][ty] = uor[tx][ty];
}
else if( threadIdx.x == SR_BW - 1 )
{
uor[tx+1][ty] = uor_g[idx+1];
}
if( y == ny -1 )
{
uor[tx][ty+1] = uor[tx][ty];
}
else if( threadIdx.y == SR_BH -1 )
{
uor[tx][ty+1] = uor_g[idx+pitchf1];
}
}
__syncthreads();
if(x < nx && y < ny)// guards
{
// compute
float dx = (xi1_g[idx] + tau_d * factor_update * (uor[tx+1][ty] - uor[tx][ty])) / huber_denom;
float dy = (xi2_g[idx] + tau_d * factor_update * (uor[tx][ty+1] - uor[tx][ty])) / huber_denom;
float denom = sqrtf( dx * dx + dy * dy ) / factor_clipping;
if(denom < 1.0f) denom = 1.0f;
xi1_g[idx] = dx / denom;
xi2_g[idx] = dy / denom;
}
}
void computeSuperresolutionUngerGPU
(
float *xi1_g, // Dual Variable for TV regularization in X direction
float *xi2_g, // Dual Variable for TV regularization in Y direction
float *temp1_g, // Helper array
float *temp2_g,
float *temp3_g,
float *temp4_g,
float *uor_g, // Field of overrelaxed primal variables
float *u_g, // GPU memory for the result image
std::vector<float*> &q_g, // Dual variables for L1 difference penalization
std::vector<float*> &images_g, // Input images in original resolution
std::list<FlowGPU> &flowsGPU, // GPU memory for the displacement fields
// class FlowGPU { void clear(); float *u_g; float *v_g; int nx; int ny; }
int &nx, // New High-Resolution Width
int &ny, // New High-Resolution Height
int &pitchf1, // GPU pitch (padded width) of the superresolution high-res fields
int &nx_orig, // Original Low-Resolution Width
int &ny_orig, // Original Low-Resolution Height
int &pitchf1_orig, // GPU pitch (padded width) of the original low-res images
int &oi, // Number of Iterations
float &tau_p, // Primal Update Step Size
float &tau_d, // Dual Update Step Size
float &factor_tv, // The weight of Total Variation Penalization
float &huber_epsilon, // Parameter for Huber norm regularization
float &factor_rescale_x, // High-Resolution Width divided by Low-Resolution Width
float &factor_rescale_y, // High-Resolution Height divided by Low-Resolution Height
float &blur, // The amount of Gaussian Blur present in the degrading process
float &overrelaxation, // Overrelaxation parameter in the range of [1,2]
int debug // Debug Flag, if activated the class produces Debug output.
)
{
// grid and block dimensions
int gridsize_x = ((nx - 1) / SR_BW) + 1;
int gridsize_y = ((ny - 1) / SR_BH) + 1;
dim3 dimGrid ( gridsize_x, gridsize_y );
dim3 dimBlock ( SR_BW, SR_BH );
// initialise xi1_g and xi2_g to zero
setKernel <<<dimGrid, dimBlock>>>( xi1_g, nx, ny, pitchf1, 0.0f );
setKernel <<<dimGrid, dimBlock>>>( xi2_g, nx, ny, pitchf1, 0.0f );
// initialise u_g and uor_g to 64.0f (final output and overrelaxated superresolution image)
setKernel <<<dimGrid, dimBlock>>>( u_g, nx, ny, pitchf1, 64.0f );
setKernel <<<dimGrid, dimBlock>>>( uor_g, nx, ny, pitchf1, 64.0f );
// initialise all elements of q_g to zero (difference images)
for(unsigned int k = 0; k < q_g.size(); ++k )
{
setKernel <<<dimGrid, dimBlock>>>( q_g[k], nx_orig, ny_orig, pitchf1_orig, 0.0f );
}
float factorquad = factor_rescale_x * factor_rescale_x * factor_rescale_y * factor_rescale_y;
float factor_degrade_update = pow( factorquad, CLIPPING_TRADEOFF_DEGRADE_1N );
float factor_degrade_clipping = factorquad / factor_degrade_update;
float huber_denom_degrade = 1.0f + huber_epsilon * tau_d / factor_degrade_clipping;
float factor_tv_update = pow( factor_tv, CLIPPING_TRADEOFF_TV );
float factor_tv_clipping = factor_tv / factor_tv_update;
float huber_denom_tv = 1.0f + huber_epsilon * tau_d / factor_tv;
// outer iterations for convergence
for( int i = 0; i < oi; ++i )
{
// calculate dual tv
#if SHARED_MEM
dualTVHuber_sm<<<dimGrid,dimBlock>>>
( uor_g, xi1_g, xi2_g, nx, ny, pitchf1, factor_tv_update, factor_tv_clipping, huber_denom_tv, tau_d );
#else
dualTVHuber_gm<<<dimGrid,dimBlock>>>
( uor_g, xi1_g, xi2_g, nx, ny, pitchf1, factor_tv_update, factor_tv_clipping, huber_denom_tv, tau_d );
#endif
// DUAL DATA
// iterating over all images
std::vector<float*>::iterator image = images_g.begin();
std::list<FlowGPU>::iterator flow = flowsGPU.begin();
for( unsigned int k = 0; image != images_g.end() && flow != flowsGPU.end() && k < q_g.size(); ++k, ++flow, ++image )
{
// backward warping of upsampled input image using flow
#if BACKWARDSWARPING_VALUE_TEXTURE_MEM
backwardRegistrationBilinearValueTex ( uor_g, flow->u_g, flow->v_g, temp1_g, 0.0f, nx, ny, pitchf1, pitchf1, 1.0f, 1.0f );
#else
backwardRegistrationBilinearValueTex_gm ( uor_g, flow->u_g, flow->v_g, temp1_g, 0.0f, nx, ny, pitchf1, pitchf1, 1.0f, 1.0f );
#endif
if( blur > 0.0f )
{
// blur warped input image
#if GAUSS_MEMORY == 2
// gauss with texture memory
gaussBlurSeparateMirrorGpu ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#elif GAUSS_MEMORY == 1
// gauss with shared memory
gaussBlurSeparateMirrorGpu_sm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#else
// gauss with global memory
gaussBlurSeparateMirrorGpu_gm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#endif
}
else
{
// swap the helper array pointers, if not blurred
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
if( factor_rescale_x > 1.0f || factor_rescale_y > 1.0f )
{
// downsampling of blurred and warped image
resampleAreaParallelSeparate(
temp2_g, // input image
temp1_g, // output image
nx, ny, // input size
pitchf1, // input pitch
nx_orig, ny_orig, // output size
pitchf1_orig, // output pitch
temp4_g // helper array
);
}
else
{
// swap the helper array pointers
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
// compute difference between warped and downsampled input image
// and current small reference image (the one we want to compute the superresolution of)
dualL1Difference<<<dimGrid, dimBlock>>>
( temp1_g, *image, q_g[k], nx_orig, ny_orig, pitchf1_orig,
factor_degrade_update, factor_degrade_clipping, huber_denom_degrade, tau_d);
}
// reset 3rd helper array to zero
setKernel <<<dimGrid, dimBlock>>>( temp3_g, nx, ny, pitchf1, 0.0f );
// iterating over all images
image = images_g.begin();
flow = flowsGPU.begin();
for( unsigned int k = 0; image != images_g.end() && flow != flowsGPU.end() && k < q_g.size(); ++k, ++flow, ++image )
{
if( factor_rescale_x > 1.0f || factor_rescale_y > 1.0f )
{
// upsample difference images
resampleAreaParallelSeparateAdjoined( q_g[k], temp1_g, nx_orig, ny_orig, pitchf1_orig, nx, ny, pitchf1, temp4_g );
}
else
{
// copy q_g[k] to temp1_g
cudaMemcpy( temp1_g, q_g[k], ny * pitchf1, cudaMemcpyDeviceToDevice );
}
if( blur > 0.0f )
{
// blur upsampled difference image
#if GAUSS_MEMORY == 2
// gauss with texture memory
gaussBlurSeparateMirrorGpu ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#elif GAUSS_MEMORY == 1
// gauss with shared memory
gaussBlurSeparateMirrorGpu_sm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#else
// gauss with global memory
gaussBlurSeparateMirrorGpu_gm ( temp1_g, temp2_g, nx, ny, pitchf1, blur, blur, (int)(3.0f * blur), temp4_g, 0 );
#endif
}
else
{
// swap the helper array pointers
float *temp = temp1_g;
temp1_g = temp2_g;
temp2_g = temp;
}
// foreward warping of the difference image
forewardRegistrationBilinearAtomic (
flow->u_g, flow->v_g,
temp2_g, temp1_g,
nx, ny,
pitchf1
);
// sum all difference images up
addKernel<<<dimGrid, dimBlock>>>( temp1_g, temp3_g, nx, ny, pitchf1 );
}
#if SHARED_MEM
primal1N_sm<<< dimGrid, dimBlock>>>(xi1_g, xi2_g, temp3_g, u_g, uor_g,
nx, ny, pitchf1, factor_tv_update, factor_degrade_update, tau_p, overrelaxation);
#else
primal1N_gm<<< dimGrid, dimBlock>>>(xi1_g, xi2_g, temp3_g, u_g, uor_g,
nx, ny, pitchf1, factor_tv_update, factor_degrade_update, tau_p, overrelaxation);
#endif
}
}
|
757f7d7abf7640841cd16953fdfd14348c814b97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <stdio.h>
#include <float.h>
__global__ void findMinMaxKernel(const float * const logLuminance, float * const minLogLum, const int op, const int numRows, const int numCols){
int totalId = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float sharedLogLuminance[];
if(totalId >= numRows * numCols)
return;
//if(totalId == 1000) printf("%0.2f\n", logLuminance[totalId]);
sharedLogLuminance[tid] = logLuminance[totalId];
__syncthreads();
// do reduction
for(unsigned int s = blockDim.x / 2; s >= 1; s >>= 1){
if(tid < s){
if(op == 0)
sharedLogLuminance[tid] = min(sharedLogLuminance[tid],sharedLogLuminance[tid + s]);
else
sharedLogLuminance[tid] = max(sharedLogLuminance[tid],sharedLogLuminance[tid + s]);
}
__syncthreads();
}
//logLuminance[totalId] = sharedLogLuminance[totalId];
if(tid == 0){
//logLuminance[blockIdx.x] = sharedLogLuminance[0];
minLogLum[blockIdx.x] = sharedLogLuminance[0];
}
}
void findMinMax(const float * const d_logLuminance, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols){
const int threadsPerBlock = 256;
const int numBlocks = numRows * numCols / threadsPerBlock + 1;
float h_minLogLum[numBlocks];
float h_maxLogLum[numBlocks];
for(int i = 0; i < numBlocks; i++){
h_minLogLum[i] = FLT_MAX;
h_maxLogLum[i] = FLT_MIN;
}
float * d_minLogLum;
float * d_maxLogLum;
checkCudaErrors(hipMalloc((void **) &d_minLogLum, sizeof(float) * numBlocks));
checkCudaErrors(hipMalloc((void **) &d_maxLogLum, sizeof(float) * numBlocks));
//printf("Number of blocks: %d\n", numBlocks);
hipLaunchKernelGGL(( findMinMaxKernel), dim3(numBlocks),dim3(threadsPerBlock), sizeof(float) * threadsPerBlock, 0, d_logLuminance, d_minLogLum, 0, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_minLogLum,d_minLogLum, sizeof(float) * numBlocks, hipMemcpyDeviceToHost));
min_logLum = FLT_MAX;
for(int i = 0; i < numBlocks; i++){
//printf("%0.2f\n", h_minLogLum[i]);
if(h_minLogLum[i] < min_logLum)
min_logLum = h_minLogLum[i];
}
hipLaunchKernelGGL(( findMinMaxKernel), dim3(numBlocks),dim3(threadsPerBlock), sizeof(float) * threadsPerBlock , 0, d_logLuminance, d_maxLogLum, 1, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_maxLogLum,d_maxLogLum, sizeof(float) * numBlocks, hipMemcpyDeviceToHost));
max_logLum = -FLT_MAX;
for(int i = 0; i < numBlocks; i++){
if(h_maxLogLum[i] > max_logLum)
max_logLum = h_maxLogLum[i];
}
hipFree(d_minLogLum);
hipFree(d_maxLogLum);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
__global__ void computeHistogramKernel(const float* const d_logLuminance,
unsigned int * const d_histogram,
const float lumMin,
float lumRange,
size_t numBins,
size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
if(totalId >= length)
return;
int bin = (int) ((d_logLuminance[totalId] - lumMin) / lumRange * numBins);
//if(bin < 10) printf("%d ", bin);
if(bin == numBins) bin -= 1;
if(bin >= numBins || bin < 0) { printf("Numbins: %d ", numBins); printf("Problem! Bin: %d\n", bin, numBins); }
atomicAdd(d_histogram + bin, 1);
}
__global__ void computeCdfKernel(const unsigned int * const d_histogram,
unsigned int * const d_cdf,
unsigned int * const d_temp,
unsigned int * const d_sumresults,
size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
size_t tId = threadIdx.x;
if(totalId >= length)
return;
if(totalId > 0)
d_temp[totalId] = d_histogram[totalId - 1];
else
d_temp[totalId] = 0;
__syncthreads();
for(int s = 1; s < length; s *= 2){
//if(totalId == 1023) { printf("s: %d, ", s); printf("d_temp: %u ", d_temp[totalId]); printf("d_temp - s: %u\n", d_temp[totalId-s]);}
//if(totalId == 510) { printf("780 s: %d, ", s); printf("d_temp: %u ", d_temp[totalId]); printf("d_temp - s: %u\n", d_temp[totalId-s]);}
if(tId >= s){ // we don't want totalId - s to go out of bounds
d_cdf[totalId] = d_temp[totalId] + d_temp[totalId - s];
} else {
d_cdf[totalId] = d_temp[totalId];
}
__syncthreads();
d_temp[totalId] = d_cdf[totalId];
__syncthreads();
}
if(threadIdx.x == blockDim.x - 1){
d_sumresults[blockIdx.x] = d_cdf[totalId] + d_histogram[totalId];
}
}
__global__ void sumCdfKernel(unsigned int * const d_cdf, unsigned int * const d_sumresults, size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
if(totalId >= length)
return;
for(int s = 1; s < gridDim.x; s++){
if(blockIdx.x >= s){ // don't want blockIdx.x - s to be out of bounds
d_cdf[totalId] += d_sumresults[blockIdx.x - s];
}
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
findMinMax(d_logLuminance, min_logLum, max_logLum, numRows, numCols);
printf("Min value: %0.2f, max value: %0.2f\n", min_logLum, max_logLum);
float range = max_logLum - min_logLum;
// HISTOGRAM
int threadsPerBlock = 512;
int numBlocks = numRows * numCols / threadsPerBlock + 1;
unsigned int h_histogram[numBins];
memset(h_histogram, 0, sizeof(unsigned int) * numBins);
unsigned int * d_histogram;
checkCudaErrors(hipMalloc((void **) &d_histogram, sizeof(int) * numBins));
checkCudaErrors(hipMemset(d_histogram, 0, sizeof(unsigned int) * numBins));
hipLaunchKernelGGL(( computeHistogramKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_logLuminance, d_histogram, min_logLum, range, numBins, numRows * numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_histogram, d_histogram, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost));
// SCAN
threadsPerBlock = 512;
numBlocks = ceil(float(numBins) / float(threadsPerBlock));
unsigned int * d_temp;
checkCudaErrors(hipMalloc((void **) &d_temp, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_temp, 0, sizeof(unsigned int) * numBins));
unsigned int * d_sumresults;
checkCudaErrors(hipMalloc((void **) &d_sumresults, sizeof(unsigned int) * numBlocks));
checkCudaErrors(hipMemset(d_sumresults, 0, sizeof(unsigned int) * numBlocks));
hipLaunchKernelGGL(( computeCdfKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_histogram, d_cdf, d_temp, d_sumresults, numBins);
hipLaunchKernelGGL(( sumCdfKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_cdf, d_sumresults, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
unsigned int h_cdf[numBins];
checkCudaErrors(hipMemcpy(h_cdf, d_cdf, sizeof(unsigned int) * numBins, hipMemcpyDeviceToHost));
/*
printf("\n\n");
for(int i = 0; i < numBins; i++){
printf("%u ", h_histogram[i]);
}
printf("\n\n\n");
for(int i = 0; i < numBins; i++){
printf("%u ", h_cdf[i]);
}
printf("\nnumber of bins: %lu\n", numBins);
*/
hipFree(d_histogram);
hipFree(d_temp);
}
| 757f7d7abf7640841cd16953fdfd14348c814b97.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <stdio.h>
#include <float.h>
__global__ void findMinMaxKernel(const float * const logLuminance, float * const minLogLum, const int op, const int numRows, const int numCols){
int totalId = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
extern __shared__ float sharedLogLuminance[];
if(totalId >= numRows * numCols)
return;
//if(totalId == 1000) printf("%0.2f\n", logLuminance[totalId]);
sharedLogLuminance[tid] = logLuminance[totalId];
__syncthreads();
// do reduction
for(unsigned int s = blockDim.x / 2; s >= 1; s >>= 1){
if(tid < s){
if(op == 0)
sharedLogLuminance[tid] = min(sharedLogLuminance[tid],sharedLogLuminance[tid + s]);
else
sharedLogLuminance[tid] = max(sharedLogLuminance[tid],sharedLogLuminance[tid + s]);
}
__syncthreads();
}
//logLuminance[totalId] = sharedLogLuminance[totalId];
if(tid == 0){
//logLuminance[blockIdx.x] = sharedLogLuminance[0];
minLogLum[blockIdx.x] = sharedLogLuminance[0];
}
}
void findMinMax(const float * const d_logLuminance, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols){
const int threadsPerBlock = 256;
const int numBlocks = numRows * numCols / threadsPerBlock + 1;
float h_minLogLum[numBlocks];
float h_maxLogLum[numBlocks];
for(int i = 0; i < numBlocks; i++){
h_minLogLum[i] = FLT_MAX;
h_maxLogLum[i] = FLT_MIN;
}
float * d_minLogLum;
float * d_maxLogLum;
checkCudaErrors(cudaMalloc((void **) &d_minLogLum, sizeof(float) * numBlocks));
checkCudaErrors(cudaMalloc((void **) &d_maxLogLum, sizeof(float) * numBlocks));
//printf("Number of blocks: %d\n", numBlocks);
findMinMaxKernel<<<numBlocks,threadsPerBlock, sizeof(float) * threadsPerBlock>>>(d_logLuminance, d_minLogLum, 0, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_minLogLum,d_minLogLum, sizeof(float) * numBlocks, cudaMemcpyDeviceToHost));
min_logLum = FLT_MAX;
for(int i = 0; i < numBlocks; i++){
//printf("%0.2f\n", h_minLogLum[i]);
if(h_minLogLum[i] < min_logLum)
min_logLum = h_minLogLum[i];
}
findMinMaxKernel<<<numBlocks,threadsPerBlock, sizeof(float) * threadsPerBlock >>>(d_logLuminance, d_maxLogLum, 1, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_maxLogLum,d_maxLogLum, sizeof(float) * numBlocks, cudaMemcpyDeviceToHost));
max_logLum = -FLT_MAX;
for(int i = 0; i < numBlocks; i++){
if(h_maxLogLum[i] > max_logLum)
max_logLum = h_maxLogLum[i];
}
cudaFree(d_minLogLum);
cudaFree(d_maxLogLum);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
__global__ void computeHistogramKernel(const float* const d_logLuminance,
unsigned int * const d_histogram,
const float lumMin,
float lumRange,
size_t numBins,
size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
if(totalId >= length)
return;
int bin = (int) ((d_logLuminance[totalId] - lumMin) / lumRange * numBins);
//if(bin < 10) printf("%d ", bin);
if(bin == numBins) bin -= 1;
if(bin >= numBins || bin < 0) { printf("Numbins: %d ", numBins); printf("Problem! Bin: %d\n", bin, numBins); }
atomicAdd(d_histogram + bin, 1);
}
__global__ void computeCdfKernel(const unsigned int * const d_histogram,
unsigned int * const d_cdf,
unsigned int * const d_temp,
unsigned int * const d_sumresults,
size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
size_t tId = threadIdx.x;
if(totalId >= length)
return;
if(totalId > 0)
d_temp[totalId] = d_histogram[totalId - 1];
else
d_temp[totalId] = 0;
__syncthreads();
for(int s = 1; s < length; s *= 2){
//if(totalId == 1023) { printf("s: %d, ", s); printf("d_temp: %u ", d_temp[totalId]); printf("d_temp - s: %u\n", d_temp[totalId-s]);}
//if(totalId == 510) { printf("780 s: %d, ", s); printf("d_temp: %u ", d_temp[totalId]); printf("d_temp - s: %u\n", d_temp[totalId-s]);}
if(tId >= s){ // we don't want totalId - s to go out of bounds
d_cdf[totalId] = d_temp[totalId] + d_temp[totalId - s];
} else {
d_cdf[totalId] = d_temp[totalId];
}
__syncthreads();
d_temp[totalId] = d_cdf[totalId];
__syncthreads();
}
if(threadIdx.x == blockDim.x - 1){
d_sumresults[blockIdx.x] = d_cdf[totalId] + d_histogram[totalId];
}
}
__global__ void sumCdfKernel(unsigned int * const d_cdf, unsigned int * const d_sumresults, size_t length){
size_t totalId = blockIdx.x * blockDim.x + threadIdx.x;
if(totalId >= length)
return;
for(int s = 1; s < gridDim.x; s++){
if(blockIdx.x >= s){ // don't want blockIdx.x - s to be out of bounds
d_cdf[totalId] += d_sumresults[blockIdx.x - s];
}
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
findMinMax(d_logLuminance, min_logLum, max_logLum, numRows, numCols);
printf("Min value: %0.2f, max value: %0.2f\n", min_logLum, max_logLum);
float range = max_logLum - min_logLum;
// HISTOGRAM
int threadsPerBlock = 512;
int numBlocks = numRows * numCols / threadsPerBlock + 1;
unsigned int h_histogram[numBins];
memset(h_histogram, 0, sizeof(unsigned int) * numBins);
unsigned int * d_histogram;
checkCudaErrors(cudaMalloc((void **) &d_histogram, sizeof(int) * numBins));
checkCudaErrors(cudaMemset(d_histogram, 0, sizeof(unsigned int) * numBins));
computeHistogramKernel<<<numBlocks, threadsPerBlock>>>(d_logLuminance, d_histogram, min_logLum, range, numBins, numRows * numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_histogram, d_histogram, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost));
// SCAN
threadsPerBlock = 512;
numBlocks = ceil(float(numBins) / float(threadsPerBlock));
unsigned int * d_temp;
checkCudaErrors(cudaMalloc((void **) &d_temp, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_temp, 0, sizeof(unsigned int) * numBins));
unsigned int * d_sumresults;
checkCudaErrors(cudaMalloc((void **) &d_sumresults, sizeof(unsigned int) * numBlocks));
checkCudaErrors(cudaMemset(d_sumresults, 0, sizeof(unsigned int) * numBlocks));
computeCdfKernel<<<numBlocks, threadsPerBlock>>>(d_histogram, d_cdf, d_temp, d_sumresults, numBins);
sumCdfKernel<<<numBlocks, threadsPerBlock>>>(d_cdf, d_sumresults, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
unsigned int h_cdf[numBins];
checkCudaErrors(cudaMemcpy(h_cdf, d_cdf, sizeof(unsigned int) * numBins, cudaMemcpyDeviceToHost));
/*
printf("\n\n");
for(int i = 0; i < numBins; i++){
printf("%u ", h_histogram[i]);
}
printf("\n\n\n");
for(int i = 0; i < numBins; i++){
printf("%u ", h_cdf[i]);
}
printf("\nnumber of bins: %lu\n", numBins);
*/
cudaFree(d_histogram);
cudaFree(d_temp);
}
|
7f35b0479fa4f0f88ddd90cae34b053504c1e44d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histo_kernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *buffer = NULL;
hipMalloc(&buffer, XSIZE*YSIZE);
unsigned int *histo = NULL;
hipMalloc(&histo, XSIZE*YSIZE);
long size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,histo,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,histo,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histo_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,histo,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7f35b0479fa4f0f88ddd90cae34b053504c1e44d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histo_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *buffer = NULL;
cudaMalloc(&buffer, XSIZE*YSIZE);
unsigned int *histo = NULL;
cudaMalloc(&histo, XSIZE*YSIZE);
long size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histo_kernel<<<gridBlock,threadBlock>>>(buffer,histo,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histo_kernel<<<gridBlock,threadBlock>>>(buffer,histo,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histo_kernel<<<gridBlock,threadBlock>>>(buffer,histo,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
897015aeedbfe568c5a7f9643de705d19010e59d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <fstream>
#include <iomanip>
#include <sstream>
#include <cstdlib>
#include <cstdio>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
//Defined in order to test different block and grid dimensions
// THREAD_COUNT and GRID_DIM must be power of 2
#define THREAD_COUNT 1024
#define GRID_DIM 32
using namespace std;
float* LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray);
void WritePGM(char * sFileName, float * pDst_Host, int nWidth, int nHeight, int nMaxGray);
/*
* Three kernels are called in the following order:
* findMinMax
* finalMinMax
* calcPixelVal
*
* findMinMax finds mins&maxes for each block
* finalMinMax finds 1 min and 1 max from previous output
* calcPixelVal calculates new pixels value according to min&max
*/
int main(void)
{
float *Lena_Original_h, *Lena_d, *Res_MinsMaxs; // Pointer to host & device arrays
float *min, *max, *constant; //For keeping min, max and a constant value which holds (255/(max-min))
size_t vector_size; //total size of lena
vector_size = LENA_SIZE * sizeof(float);
int nWidth, nHeight, nMaxGray;
Lena_Original_h= LoadPGM("lena_before.pgm", nWidth, nHeight, nMaxGray);
//allocate space for variables on device
hipMalloc((void **)&min, sizeof(float));
hipMalloc((void **)&max, sizeof(float));
hipMalloc((void **)&constant, sizeof(float));
hipMalloc((void **)&Lena_d, vector_size); // Allocate array on device for Lena
hipMemcpy(Lena_d, Lena_Original_h, vector_size, hipMemcpyHostToDevice); // copy values to device
//Block dimension is directly from THREAD_COUNT
dim3 Block_dim(THREAD_COUNT, 1, 1);
//Grid dim will start from GRID_DIM
//Not used dependent values in order to test different dimensions
dim3 Grid_dim(GRID_DIM, 1, 1);
//For shared memory size. x2 for keeping minimums and maximums in one array
int smemSize = sizeof(float) * THREAD_COUNT * 2;
//For keeping minimums and maximums found in each block
// x2 for for keeping minimums and maximums in one array
hipMalloc((void **)&Res_MinsMaxs, sizeof(float)*Grid_dim.x * 2);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//kernel calls for findings mins&maxes for each block
switch (THREAD_COUNT)
{
case 1024:
findMinMax<1024> << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 512:
findMinMax<512 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 256:
findMinMax<256 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 128:
findMinMax<128 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 64:
findMinMax<64 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 32:
findMinMax<32 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 16:
findMinMax<16 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 8:
findMinMax<8 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 4:
findMinMax<4 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 2:
findMinMax<2 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 1:
findMinMax<1 > <<< Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
}
//From the kernels above, Grid_dim*2 min and max values will be produced.
//each block produces a min&max value
//while taking values from global memory, each thread takes multiple elements
//so shared memory size for all max&min values => sizeof(float)*(Grid_dim*2)/2
smemSize = sizeof(float) * Grid_dim.x;
//because each thread takes two elements, block_dim will be half of Grid_dim
Block_dim = dim3((Grid_dim.x) / 2, 1, 1);
//only 1 block in order two get just 1 min&max
//Also previous grid_dim will not be more than 1024, so there will not be too many elements
Grid_dim = dim3(1, 1, 1);
//kernel calls for finding final min&max values
switch (Block_dim.x)
{
case 1024:
finalMinMax<1024> << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 512:
finalMinMax<512 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 256:
finalMinMax<256 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 128:
finalMinMax<128 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 64:
finalMinMax<64 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 32:
finalMinMax<32 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 16:
finalMinMax<16 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 8:
finalMinMax<8 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 4:
finalMinMax<4 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 2:
finalMinMax<2 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 1:
finalMinMax<1 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
}
//These are basic calls there is one thread for one pixel and each thread substract min and multiply with the constant
Block_dim = dim3(THREAD_COUNT, 1, 1);
Grid_dim = dim3(LENA_SIZE / THREAD_COUNT, 1, 1);
calcPixelVal<< < Grid_dim, Block_dim >> > (Lena_d, constant, min);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float et;
hipEventElapsedTime(&et, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Retrieve result from device and store it in host array
hipMemcpy(Lena_Original_h, Lena_d, vector_size, hipMemcpyDeviceToHost);
WritePGM("lena_after.pgm", Lena_Original_h, nWidth, nHeight, nMaxGray);
free(Lena_Original_h);
hipFree(Lena_d);
hipFree(Res_MinsMaxs);
hipFree(min);
hipFree(max);
hipFree(constant);
printf("GPU execution time= %f ms\n", et);
}
//For reading .pgm file
float* LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray)
{
char aLine[256];
FILE * fInput = fopen(sFileName, "r");
if (fInput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
// First line: version
fgets(aLine, 256, fInput);
std::cout << "\tVersion: " << aLine;
// Second line: comment
fgets(aLine, 256, fInput);
std::cout << "\tComment: " << aLine;
fseek(fInput, -1, SEEK_CUR);
// Third line: size
fscanf(fInput, "%d", &nWidth);
std::cout << "\tWidth: " << nWidth;
fscanf(fInput, "%d", &nHeight);
std::cout << " Height: " << nHeight << std::endl;
// Fourth line: max value
fscanf(fInput, "%d", &nMaxGray);
std::cout << "\tMax value: " << nMaxGray << std::endl;
while (getc(fInput) != '\n');
// Following lines: data
float * pSrc_Host = new float[nWidth * nHeight];
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
pSrc_Host[i*nWidth + j] = fgetc(fInput);
fclose(fInput);
return pSrc_Host;
}
//For writing .pgm file
void WritePGM(char * sFileName, float * pDst_Host, int nWidth, int nHeight, int nMaxGray)
{
FILE * fOutput = fopen(sFileName, "w+");
if (fOutput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
char * aComment = "# Created by NPP";
fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray);
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
fputc(pDst_Host[i*nWidth + j], fOutput);
fclose(fOutput);
} | 897015aeedbfe568c5a7f9643de705d19010e59d.cu | #include <iostream>
#include <string.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <fstream>
#include <iomanip>
#include <sstream>
#include <cstdlib>
#include <cstdio>
#include <float.h>
#include <curand_mtgp32_kernel.h>
//Defined in order to test different block and grid dimensions
// THREAD_COUNT and GRID_DIM must be power of 2
#define THREAD_COUNT 1024
#define GRID_DIM 32
using namespace std;
float* LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray);
void WritePGM(char * sFileName, float * pDst_Host, int nWidth, int nHeight, int nMaxGray);
/*
* Three kernels are called in the following order:
* findMinMax
* finalMinMax
* calcPixelVal
*
* findMinMax finds mins&maxes for each block
* finalMinMax finds 1 min and 1 max from previous output
* calcPixelVal calculates new pixels value according to min&max
*/
int main(void)
{
float *Lena_Original_h, *Lena_d, *Res_MinsMaxs; // Pointer to host & device arrays
float *min, *max, *constant; //For keeping min, max and a constant value which holds (255/(max-min))
size_t vector_size; //total size of lena
vector_size = LENA_SIZE * sizeof(float);
int nWidth, nHeight, nMaxGray;
Lena_Original_h= LoadPGM("lena_before.pgm", nWidth, nHeight, nMaxGray);
//allocate space for variables on device
cudaMalloc((void **)&min, sizeof(float));
cudaMalloc((void **)&max, sizeof(float));
cudaMalloc((void **)&constant, sizeof(float));
cudaMalloc((void **)&Lena_d, vector_size); // Allocate array on device for Lena
cudaMemcpy(Lena_d, Lena_Original_h, vector_size, cudaMemcpyHostToDevice); // copy values to device
//Block dimension is directly from THREAD_COUNT
dim3 Block_dim(THREAD_COUNT, 1, 1);
//Grid dim will start from GRID_DIM
//Not used dependent values in order to test different dimensions
dim3 Grid_dim(GRID_DIM, 1, 1);
//For shared memory size. x2 for keeping minimums and maximums in one array
int smemSize = sizeof(float) * THREAD_COUNT * 2;
//For keeping minimums and maximums found in each block
// x2 for for keeping minimums and maximums in one array
cudaMalloc((void **)&Res_MinsMaxs, sizeof(float)*Grid_dim.x * 2);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//kernel calls for findings mins&maxes for each block
switch (THREAD_COUNT)
{
case 1024:
findMinMax<1024> << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 512:
findMinMax<512 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 256:
findMinMax<256 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 128:
findMinMax<128 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 64:
findMinMax<64 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 32:
findMinMax<32 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 16:
findMinMax<16 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 8:
findMinMax<8 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 4:
findMinMax<4 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 2:
findMinMax<2 > << < Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
case 1:
findMinMax<1 > <<< Grid_dim, Block_dim, smemSize >> > (Lena_d, Res_MinsMaxs); break;
}
//From the kernels above, Grid_dim*2 min and max values will be produced.
//each block produces a min&max value
//while taking values from global memory, each thread takes multiple elements
//so shared memory size for all max&min values => sizeof(float)*(Grid_dim*2)/2
smemSize = sizeof(float) * Grid_dim.x;
//because each thread takes two elements, block_dim will be half of Grid_dim
Block_dim = dim3((Grid_dim.x) / 2, 1, 1);
//only 1 block in order two get just 1 min&max
//Also previous grid_dim will not be more than 1024, so there will not be too many elements
Grid_dim = dim3(1, 1, 1);
//kernel calls for finding final min&max values
switch (Block_dim.x)
{
case 1024:
finalMinMax<1024> << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 512:
finalMinMax<512 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 256:
finalMinMax<256 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 128:
finalMinMax<128 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 64:
finalMinMax<64 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 32:
finalMinMax<32 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 16:
finalMinMax<16 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 8:
finalMinMax<8 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 4:
finalMinMax<4 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 2:
finalMinMax<2 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
case 1:
finalMinMax<1 > << < Grid_dim, Block_dim, smemSize >> > (Res_MinsMaxs, constant, min, max); break;
}
//These are basic calls there is one thread for one pixel and each thread substract min and multiply with the constant
Block_dim = dim3(THREAD_COUNT, 1, 1);
Grid_dim = dim3(LENA_SIZE / THREAD_COUNT, 1, 1);
calcPixelVal<< < Grid_dim, Block_dim >> > (Lena_d, constant, min);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Retrieve result from device and store it in host array
cudaMemcpy(Lena_Original_h, Lena_d, vector_size, cudaMemcpyDeviceToHost);
WritePGM("lena_after.pgm", Lena_Original_h, nWidth, nHeight, nMaxGray);
free(Lena_Original_h);
cudaFree(Lena_d);
cudaFree(Res_MinsMaxs);
cudaFree(min);
cudaFree(max);
cudaFree(constant);
printf("GPU execution time= %f ms\n", et);
}
//For reading .pgm file
float* LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray)
{
char aLine[256];
FILE * fInput = fopen(sFileName, "r");
if (fInput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
// First line: version
fgets(aLine, 256, fInput);
std::cout << "\tVersion: " << aLine;
// Second line: comment
fgets(aLine, 256, fInput);
std::cout << "\tComment: " << aLine;
fseek(fInput, -1, SEEK_CUR);
// Third line: size
fscanf(fInput, "%d", &nWidth);
std::cout << "\tWidth: " << nWidth;
fscanf(fInput, "%d", &nHeight);
std::cout << " Height: " << nHeight << std::endl;
// Fourth line: max value
fscanf(fInput, "%d", &nMaxGray);
std::cout << "\tMax value: " << nMaxGray << std::endl;
while (getc(fInput) != '\n');
// Following lines: data
float * pSrc_Host = new float[nWidth * nHeight];
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
pSrc_Host[i*nWidth + j] = fgetc(fInput);
fclose(fInput);
return pSrc_Host;
}
//For writing .pgm file
void WritePGM(char * sFileName, float * pDst_Host, int nWidth, int nHeight, int nMaxGray)
{
FILE * fOutput = fopen(sFileName, "w+");
if (fOutput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
char * aComment = "# Created by NPP";
fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray);
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
fputc(pDst_Host[i*nWidth + j], fOutput);
fclose(fOutput);
} |
8861f0adbd213fd79997c64049483a0a884ab360.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe
| 8861f0adbd213fd79997c64049483a0a884ab360.cu | /*
All modification made by Cambricon Corporation: © 2018-2019 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2019, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void RMSPropUpdate(int N, Dtype* g, Dtype* h,
Dtype rms_decay, Dtype delta, Dtype local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float hi = h[i] = rms_decay*h[i] + (1-rms_decay)*gi*gi;
g[i] = local_rate * g[i] / (sqrt(hi) + delta);
}
}
template <typename Dtype>
void rmsprop_update_gpu(int N, Dtype* g, Dtype* h, Dtype rms_decay,
Dtype delta, Dtype local_rate) {
RMSPropUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, h, rms_decay, delta, local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void rmsprop_update_gpu<float>(int, float*, float*, float, float,
float);
template void rmsprop_update_gpu<double>(int, double*, double*, double, double,
double);
} // namespace caffe
|
428686cf67e071f00101cafc020f946f9e030668.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//nvcc -ptx test.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
#include "hiprand/hiprand_kernel.h"
__device__ void EM1( double *x,
double *y,
const int parNum) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
hiprandState_t state;
hiprand_init((unsigned long long)clock(),0,n, & state);
x[n]=hiprand_uniform_double(&state);
y[n]=hiprand_normal(&state);
}
__global__ void processMandelbrotElement(
double *x,
double *y,
const int parNum) {
EM1(x,y,parNum);
} | 428686cf67e071f00101cafc020f946f9e030668.cu | //nvcc -ptx test.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
#include "curand_kernel.h"
__device__ void EM1( double *x,
double *y,
const int parNum) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
curandState state;
curand_init((unsigned long long)clock(),0,n, & state);
x[n]=curand_uniform_double(&state);
y[n]=curand_normal(&state);
}
__global__ void processMandelbrotElement(
double *x,
double *y,
const int parNum) {
EM1(x,y,parNum);
} |
7a9ffed5b78a888e3dbfd08a73cd02628a39f5c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduce_v1(float* in,float* out, int n){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BX = blockDim.x; //same as THEAD_MAX
int i = bx*BX+tx;
__shared__ float S[THEAD_MAX];
S[tx] = i < n ? in[i] : 0;
__syncthreads();
for(int s=1; s<BX ;s*=2){
int index = 2*s*tx;
if(index < BX)
S[index] += S[index+s];
__syncthreads();
}
if(tx==0)
out[bx] = S[0];
} | 7a9ffed5b78a888e3dbfd08a73cd02628a39f5c6.cu | #include "includes.h"
__global__ void reduce_v1(float* in,float* out, int n){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BX = blockDim.x; //same as THEAD_MAX
int i = bx*BX+tx;
__shared__ float S[THEAD_MAX];
S[tx] = i < n ? in[i] : 0;
__syncthreads();
for(int s=1; s<BX ;s*=2){
int index = 2*s*tx;
if(index < BX)
S[index] += S[index+s];
__syncthreads();
}
if(tx==0)
out[bx] = S[0];
} |
bb99f1cfc3ca18cc74bd5793b4a2761e9942b1ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/balanceable_sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BSCE_scale_GPU(const int nthreads,
Dtype* bottom_diff, const Dtype* his_stat_, const Dtype* cls_grad_, const Dtype* target, int bin_num_, bool b_cls_, bool b_his_){
CUDA_KERNEL_LOOP(i, nthreads) {
if ( b_his_ )
bottom_diff[ i ] = bottom_diff[ i ] * his_stat_[ (int)floorf(fabs(bottom_diff[ i ]) * bin_num_) ];
if ( b_cls_ )
bottom_diff[ i ] = bottom_diff[ i ] * cls_grad_[ (int)target[ i ] ]; //nthreads / 2.0 / (cls_stat_[ (int)target[ i ] ]);
}
}
template <typename Dtype>
void BSCELossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
if ( bottom[ 0 ]->count() < 1 || valid_num_ < 1 ){
return;
}
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
BSCE_statistics(bottom[ 0 ]->mutable_cpu_diff(), count, bottom[ 1 ]->cpu_data());
// Scale down gradient
const Dtype loss_weight = top[ 0 ]->cpu_diff()[ 0 ];
BSCE_scale_GPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, bottom[ 0 ]->mutable_gpu_diff(), his_stat_.gpu_data(), cls_grad_.gpu_data(), target, bin_num_, b_cls_, b_his_ );
CUDA_POST_KERNEL_CHECK;
caffe_gpu_scal(count, loss_weight / valid_num_, bottom[ 0 ]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_BACKWARD(BSCELossLayer);
} // namespace caffe
| bb99f1cfc3ca18cc74bd5793b4a2761e9942b1ad.cu | #include <vector>
#include "caffe/layers/balanceable_sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BSCE_scale_GPU(const int nthreads,
Dtype* bottom_diff, const Dtype* his_stat_, const Dtype* cls_grad_, const Dtype* target, int bin_num_, bool b_cls_, bool b_his_){
CUDA_KERNEL_LOOP(i, nthreads) {
if ( b_his_ )
bottom_diff[ i ] = bottom_diff[ i ] * his_stat_[ (int)floorf(fabs(bottom_diff[ i ]) * bin_num_) ];
if ( b_cls_ )
bottom_diff[ i ] = bottom_diff[ i ] * cls_grad_[ (int)target[ i ] ]; //nthreads / 2.0 / (cls_stat_[ (int)target[ i ] ]);
}
}
template <typename Dtype>
void BSCELossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
if ( bottom[ 0 ]->count() < 1 || valid_num_ < 1 ){
return;
}
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
BSCE_statistics(bottom[ 0 ]->mutable_cpu_diff(), count, bottom[ 1 ]->cpu_data());
// Scale down gradient
const Dtype loss_weight = top[ 0 ]->cpu_diff()[ 0 ];
BSCE_scale_GPU<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >( count, bottom[ 0 ]->mutable_gpu_diff(), his_stat_.gpu_data(), cls_grad_.gpu_data(), target, bin_num_, b_cls_, b_his_ );
CUDA_POST_KERNEL_CHECK;
caffe_gpu_scal(count, loss_weight / valid_num_, bottom[ 0 ]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_BACKWARD(BSCELossLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.