hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
8d75e33f979b10629ba0b3de013383ea775c41ae.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (props.major * 10 + props.minor < 80) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
8d75e33f979b10629ba0b3de013383ea775c41ae.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4. Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of meta data is different for every data types. CUTLASS templates can automatically infer it based on input A and B. Check code below. Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers efficiently. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_sparse.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/host_reorder.h" #include "cutlass/util/host_uncompress.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = int32_t; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B using ElementOutput = int32_t; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Row Major for // Matrix A, Column Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; // Data type and layout of meta data matrix E can be inferred from template Gemm. using ElementInputE = typename Gemm::ElementE; using LayoutInputE = cutlass::layout::RowMajor; using ReorderedLayoutInputE = typename Gemm::LayoutE; // Blow property is defined in include/cutlass/arch/sp_mma_sm80.h // 50% Sparsity on Ampere constexpr int kSparse = Gemm::kSparse; // How many elements of A are covered per ElementE constexpr int kElementsPerElementE = Gemm::kElementsPerElementE; // The size of individual meta data constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits; int run() { const int length_m = 512; const int length_n = 512; const int length_k = 1024; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2) cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed( problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing. cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Same size as the above. The above one needs to be reordered and stored in this one. cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered( cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE)); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(2), ElementInputA(-2), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(2), ElementInputB(-2), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomSparseMeta( tensor_e.host_view(), 1, kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core // instructions. cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(), {problem_size.m(), problem_size.n(), problem_size.k() / kSparse / kElementsPerElementE}); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_e_reordered.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device tensor_e_reordered.device_ref(), // <- reference to matrix E on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // uncompress tensor_a based on meta data tensor_e. We need it for reference computing. cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(), tensor_e.host_ref(), problem_size.m(), problem_size.k()); // Create instantiation for host reference gemm kernel cutlass::reference::host::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue, typename Gemm::Operator> gemm_host; // Launch host reference gemm kernel gemm_host(problem_size, alpha, tensor_a_uncompressed.host_ref(), tensor_b.host_ref(), beta, tensor_c.host_ref(), tensor_ref_d.host_ref()); // Copy output data from CUTLASS host for comparison tensor_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { bool notSupported = false; // Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.1. // // CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major * 10 + props.minor < 80) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
ff8483fc62305e4a56fb27408387eefe118f6df3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // snippet_begin:mtapi_cuda_c_kernel extern "C" __global__ void AddVector( void* arguments, int arguments_size, void* result_buffer, int result_buffer_size, void* node_local_data, int node_local_data_size) { int ii = blockDim.x * blockIdx.x + threadIdx.x; int elements = arguments_size / sizeof(float) / 2; if (ii >= elements) return; float* a = (float*)arguments; float* b = ((float*)arguments) + elements; float* c = (float*)result_buffer; float* d = (float*)node_local_data; c[ii] = a[ii] + b[ii] + d[0]; } // snippet_end
ff8483fc62305e4a56fb27408387eefe118f6df3.cu
// snippet_begin:mtapi_cuda_c_kernel extern "C" __global__ void AddVector( void* arguments, int arguments_size, void* result_buffer, int result_buffer_size, void* node_local_data, int node_local_data_size) { int ii = blockDim.x * blockIdx.x + threadIdx.x; int elements = arguments_size / sizeof(float) / 2; if (ii >= elements) return; float* a = (float*)arguments; float* b = ((float*)arguments) + elements; float* c = (float*)result_buffer; float* d = (float*)node_local_data; c[ii] = a[ii] + b[ii] + d[0]; } // snippet_end
4a9cd9e49dc1da5d487f234edb80c2528e17a717.hip
// !!! This is a file automatically generated by hipify!!! #include <fstream> #include <iostream> #include <chrono> #include <cusolverDn.h> #include <rocblas.h> #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/convert.h> #include <cusp/elementwise.h> #include <cusp/functional.h> #include <cusp/multiply.h> #include <cusp/print.h> #include <cusp/transpose.h> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <thrust/tabulate.h> #include <thrust/execution_policy.h> //#include "multShare.h" using namespace std; typedef float type; typedef cusp::array1d<type, cusp::device_memory> Array1d; typedef cusp::array2d<type, cusp::device_memory> Array2d; // convert a linear index to a row index template <typename T> struct linear_index_to_row_index : public thrust::unary_function<T,T> { T C; // number of columns __host__ __device__ linear_index_to_row_index(T C) : C(C) {} __host__ __device__ T operator()(T i) { return i / C; } }; template <typename T> struct reciprocal_my : public thrust::unary_function<T,T> { T value; reciprocal_my(T thr) : value(thr) {}; __host__ __device__ T operator()(const T& v) const { return sqrt(T(value) / v); } }; template<typename T> struct is_true: thrust::unary_function<T, T> { T col; is_true(T _c) : col(_c) { } ; __host__ __device__ bool operator()(const T &x) { return (x % col) != 0; } }; /* template<typename T> struct sub_matrix: thrust::unary_function<T, T> { T col; T row; T pitch; sub_matrix(T _c, T _r, T _p) : col(_c), row(_r), pitch(_p) { }; __host__ __device__ bool operator()(const T &x) { return (x % (int)pitch) < (int) col && (x / (int)pitch) < (int)row; } }; */ /* template<typename T> struct is_diagonal: thrust::unary_function<T, T> { T col; is_diagonal(T _c) : col(_c) { } ; __host__ __device__ bool operator()(const T &x) { return (x % (col + 1)) == 0; } }; */ template <typename T> struct column_by_vector : public thrust::unary_function<T,T> { T* data; T col; column_by_vector(T *_data, T _col) : data(_data), col(_col) {}; __host__ __device__ T operator()(const thrust::tuple<int,type>& v) { return data[thrust::get<0>(v) % (int)col] * thrust::get<1>(v); } }; bool svd(int M, cusp::array2d<type, cusp::device_memory>& M_denseHM, cusp::array2d<type, cusp::device_memory>& U, cusp::array1d<type, cusp::device_memory>& S) { thrust::device_ptr<type> dev_ptr = &(M_denseHM.values[0]); type *M_raw_ptr = thrust::raw_pointer_cast(dev_ptr); int work_size = 0; int *devInfo; hipMalloc(&devInfo, sizeof(int)); type *d_U; hipMalloc(&d_U, M * M * sizeof(type)); type *d_V; hipMalloc(&d_V, M * M * sizeof(type)); type *d_S; hipMalloc(&d_S, M * sizeof(type)); //cusolverStatus_t stat; hipsolverDnHandle_t solver_handle; hipsolverDnCreate(&solver_handle); hipsolverDnSgesvd_bufferSize(solver_handle, M, M, &work_size); type *work; hipMalloc(&work, work_size * sizeof(type)); hipsolverDnSgesvd(solver_handle, 'A', 'A', M, M, M_raw_ptr, M, d_S, d_U, M, d_V, M, work, work_size, NULL, devInfo); int devInfo_h = 0; hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost); thrust::device_ptr<type> dev_ptr_U(d_U); thrust::copy(thrust::device, dev_ptr_U, dev_ptr_U + (M * M), U.values.begin()); thrust::device_ptr<type> dev_ptr_S(d_S); thrust::copy(thrust::device, dev_ptr_S, dev_ptr_S + M, S.begin()); hipsolverDnDestroy(solver_handle); return 1; } int getmultiple16sizeMatriz(int n){ if (n % 16) n = n + (16 - n % 16); return n; } /* void multiply(Array2d & A, Array2d& B, Array2d& C){ // Load A and B to device memory Array2d A_new(getmultiple16sizeMatriz(A.num_rows), getmultiple16sizeMatriz(A.num_cols)); cusp::array1d<int,cusp::device_memory> index(A_new.num_rows*A_new.num_cols); thrust::sequence(index.begin(), index.end(),0); A_new(0,0) = 0; A_new(0,1) = 1; A_new(0,2) = 2; A_new(1,0) = 3; A_new(1,1) = 4; A_new(1,2) = 5; A_new(2,0) = 6; A_new(2,1) = 7; A_new(2,2) = 8; A 1 2 3 4 A_new 1 2 0 3 4 0 0 0 0 cusp::print(A_new); Matrix d_A; d_A.width = d_A.stride = A_new.num_cols; d_A.height = A_new.num_rows; thrust::device_ptr<float> dev_ptr_A = &(A_new.values[0]); d_A.elements = thrust::raw_pointer_cast(dev_ptr_A); Array2d B_new(getmultiple16sizeMatriz(B.num_rows), getmultiple16sizeMatriz(B.num_cols)); B_new(0,0) = 0; B_new(0,1) = 1; B_new(0,2) = 2; B_new(1,0) = 3; B_new(1,1) = 4; B_new(1,2) = 5; B_new(2,0) = 6; B_new(2,1) = 7; B_new(2,2) = 8; cusp::print(B_new); Matrix d_B; d_B.width = d_B.stride = B_new.num_cols; d_B.height = B_new.num_rows; thrust::device_ptr<float> dev_ptr_B = &(B_new.values[0]); d_B.elements = thrust::raw_pointer_cast(dev_ptr_B); Array2d C_new(getmultiple16sizeMatriz(C.num_rows), getmultiple16sizeMatriz(C.num_cols)); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C_new.num_cols; d_C.height = C_new.num_rows; auto size = d_C.width * d_C.height * sizeof(float); auto err = hipMalloc(&d_C.elements, size); printf("CUDA malloc C: %s\n",hipGetErrorString(err)); MatMul(d_A,d_B,d_C); thrust::device_ptr<type> dev_ptr_S(d_C.elements); thrust::copy(thrust::device, dev_ptr_S, dev_ptr_S + (d_C.width*d_C.height), C_new.values.begin()); cusp::array1d<int,cusp::device_memory> index_(C_new.num_rows*C_new.num_cols); thrust::sequence(index_.begin(), index_.end(),0); thrust::copy_if( dev_ptr_S, dev_ptr_S + (d_C.width*d_C.height), index_.begin(), C.values.begin(), sub_matrix<int>(C.num_rows,C.num_cols,C_new.num_cols)); hipFree(d_C.elements); cusp::print(C); } */ int main(int argc, char *argv[]) { if (argc != 2) { cout << "Argumento incorreto. " << endl; cout << "parmetro entrada: ./main <base de dados> " << endl; return 1; } auto start = std::chrono::high_resolution_clock::now(); //******************** ler base de dados ************************* std::string strFile = argv[1]; std::string strPrefixFile = strFile.substr(strFile.find_last_of("/")+1,strFile.find(".dat")-4); ifstream file; file.open(strFile); if(!file.is_open()){ std::cout << "Error opening file" << std::endl; return 1; } int intNrLinhas = 0; int intNrColunas = 0; std::string strLinha; std::string strNumero; std::vector<type> database; while (getline(file, strLinha)) { stringstream ssLinha(strLinha); while (getline(ssLinha, strNumero, ' ')) { database.push_back(type(std::stoi(strNumero))); if ( intNrLinhas == 0 ){ intNrColunas++; } } intNrLinhas++; } file.close(); //std::cout << "linha " << intNrLinhas << " colunas " << intNrColunas << std::endl; //hipSetDevice(0); // selecionar placa de video Tesla K40c Array2d E(intNrLinhas, intNrColunas); thrust::copy(database.begin(), database.end(), E.values.begin()); //teste //std::cout << "E:" << std::endl; //cusp::print(E); //******************** E*-2 ************************* thrust::transform(E.values.begin(), E.values.end(), E.values.begin(), cusp::multiplies_value<int>(-2)); //teste //std::cout << "E*-2:" << std::endl; //cusp::print(E); //******************** E*col+1 ************************* thrust::transform(E.values.begin(), E.values.end(), E.values.begin(), cusp::plus_value<int>(E.num_cols + 1)); //teste //std::cout << "E*col+1:" << std::endl; //cusp::print(E); //********************************************* Array2d matrizTransposta; cusp::transpose(E, matrizTransposta); //teste //std::cout << "E Transposta:" << std::endl; //cusp::print(matrizTransposta); Array2d C(E.num_cols, E.num_cols); //multiply(matrizTransposta, E, C); //testemultiply //cusp::multiply(matrizTransposta, E, C); //std::cout << "C:" << std::endl; //cusp::print(C); hipblasHandle_t h; hipblasCreate(&h); float alpha = 1.0f; float beta = 0.0f; //hipblasSgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, B.num_cols, A.num_rows, A.num_cols, &alpha, thrust::raw_pointer_cast(B.values.data()), B.num_cols, thrust::raw_pointer_cast(A.values.data()), A.num_cols, &beta, thrust::raw_pointer_cast(C.values.data()), B.num_cols); hipblasSgemm(h, HIPBLAS_OP_N, HIPBLAS_OP_N, E.num_cols, matrizTransposta.num_rows, matrizTransposta.num_cols, &alpha, thrust::raw_pointer_cast(E.values.data()), E.num_cols, thrust::raw_pointer_cast(matrizTransposta.values.data()), matrizTransposta.num_cols, &beta, thrust::raw_pointer_cast(C.values.data()), E.num_cols); //teste //std::cout << "C:" << std::endl; //cusp::print(C); thrust::transform(C.values.begin(), C.values.end(), C.values.begin(), cusp::divide_value<type>( E.num_cols * E.num_rows * (E.num_cols - 1) * (E.num_cols - 1))); //teste //std::cout << "C_div:" << std::endl; //cusp::print(C); //******************** Inicio da Decomposicao ************************* cusp::array2d<type, cusp::device_memory> U(C.num_cols, C.num_cols); cusp::array1d<type, cusp::device_memory> S(C.num_cols); //std::cout << "Inicio da Decomposicao" << std::endl; svd(C.num_cols, C, U, S); //teste //std::cout << "U:" << std::endl; //cusp::print(U); //teste //std::cout << "S:" << std::endl; //cusp::print(S); /* // Teste U(0, 0) = 0.445644; U(0, 1) = -0.12139; U(0, 2) = -0.00336875; U(0, 3) = -0.617952; U(0, 4) = 0.30292; U(0, 5) = 0.104751; U(0, 6) = 0.161157; U(0, 7) = -0.19879; U(0, 8) = 0.486383; U(1, 0) = -0.711218; U(1, 1) = 0.0423734; U(1, 2) = 0.0552392; U(1, 3) = -0.441138; U(1, 4) = 0.0339571; U(1, 5) = 0.0783436; U(1, 6) = -0.00482367; U(1, 7) = 0.470035; U(1, 8) = 0.257823; U(2, 0) = 0.31457; U(2, 1) = -0.0603901; U(2, 2) = -0.0471155; U(2, 3) = 0.0886266; U(2, 4) = -0.100349; U(2, 5) = 0.76635; U(2, 6) = 0.103892; U(2, 7) = 0.515145; U(2, 8) = -0.117446; U(3, 0) = 0.282895; U(3, 1) = 0.608804; U(3, 2) = -0.263212; U(3, 3) = -0.367443; U(3, 4) = 0.0698713; U(3, 5) = -0.262026; U(3, 6) = -0.210852; U(3, 7) = 0.299143; U(3, 8) = -0.370876; U(4, 0) = -0.244688; U(4, 1) = 0.279298; U(4, 2) = -0.70117; U(4, 3) = -0.0279127; U(4, 4) = -0.212571; U(4, 5) = 0.328237; U(4, 6) = 0.175306; U(4, 7) = -0.423009; U(4, 8) = 0.0843047; U(5, 0) = -0.12212; U(5, 1) = 0.253274; U(5, 2) = 0.37022; U(5, 3) = -0.0908042; U(5, 4) = 0.170505; U(5, 5) = 0.452362; U(5, 6) = -0.623452; U(5, 7) = -0.380869; U(5, 8) = -0.0904064; U(6, 0) = 0.189107; U(6, 1) = 0.291328; U(6, 2) = 0.222269; U(6, 3) = 0.0265378; U(6, 4) = -0.744001; U(6, 5) = -0.0819397; U(6, 6) = -0.158411; U(6, 7) = 0.0552435; U(6, 8) = 0.490776; U(7, 0) = -0.00735859; U(7, 1) = -0.496706; U(7, 2) = -0.0182501; U(7, 3) = -0.500389; U(7, 4) = -0.506722; U(7, 5) = -0.000417037; U(7, 6) = -0.120787; U(7, 7) = -0.147379; U(7, 8) = -0.457635; U(8, 0) = -0.0773703; U(8, 1) = 0.368016; U(8, 2) = 0.496989; U(8, 3) = -0.143339; U(8, 4) = -0.0870132; U(8, 5) = 0.0886237; U(8, 6) = 0.677971; U(8, 7) = -0.189519; U(8, 8) = -0.282924; // Teste S[0] = -5.05182e-017; S[1] = -8.22593e-018; S[2] = -4.1709e-018; S[3] = 1.18595e-018; S[4] = 7.85299e-018; S[5] = 1.23348e-017; S[6] = 0.0396248; S[7] = 0.152448; S[8] = 0.224594; */ /* // Teste U(0, 0) = -0.486383 ;U(0, 1) = 0.19879; U(0, 2) = -0.161157; U(0, 3) = -0.104751; U(0, 4) = -0.30292; U(0, 5) = 0.617952; U(0, 6) = 0.00336875; U(0, 7) = 0.12139; U(1, 0) = -0.257823 ;U(1, 1) = -0.470035; U(1, 2) = 0.00482367; U(1, 3) = -0.0783436; U(1, 4) = -0.0339571; U(1, 5) = 0.441138; U(1, 6) = -0.0552392; U(1, 7) = -0.0423734; U(2, 0) = 0.117446 ;U(2, 1) = -0.515145; U(2, 2) = -0.103892; U(2, 3) = -0.76635; U(2, 4) = 0.100349; U(2, 5) = -0.0886266; U(2, 6) = 0.0471155; U(2, 7) = 0.0603901; U(3, 0) = 0.370876 ;U(3, 1) = -0.299143; U(3, 2) = 0.210852; U(3, 3) = 0.262026; U(3, 4) = -0.0698713; U(3, 5) = 0.367443; U(3, 6) = 0.263212; U(3, 7) = -0.608804; U(4, 0) = -0.0843047 ;U(4, 1) = 0.423009; U(4, 2) = -0.175306; U(4, 3) = -0.328237; U(4, 4) = 0.212571; U(4, 5) = 0.0279127; U(4, 6) = 0.70117; U(4, 7) = -0.279298; U(5, 0) = 0.0904064 ;U(5, 1) = 0.380869; U(5, 2) = 0.623452; U(5, 3) = -0.452362; U(5, 4) = -0.170505; U(5, 5) = 0.0908042; U(5, 6) = -0.37022; U(5, 7) = -0.253274; U(6, 0) = -0.490776 ;U(6, 1) = -0.0552435; U(6, 2) = 0.158411; U(6, 3) = 0.0819397; U(6, 4) = 0.744001; U(6, 5) = -0.0265378; U(6, 6) = -0.222269; U(6, 7) = -0.291328; U(7, 0) = 0.457635 ;U(7, 1) = 0.147379; U(7, 2) = 0.120787; U(7, 3) = 0.000417037; U(7, 4) = 0.506722; U(7, 5) = 0.500389; U(7, 6) = 0.0182501; U(7, 7) = 0.496706; U(8, 0) = 0.282924 ;U(8, 1) = 0.189519; U(8, 2) = -0.677971; U(8, 3) = -0.0886237; U(8, 4) = 0.0870132; U(8, 5) = 0.143339; U(8, 6) = -0.496989; U(8, 7) = -0.368016; // Teste S[7] =-8.22593e-018; S[6] = -4.1709e-018; S[5] = 1.18595e-018; S[4] = 7.85299e-018; S[3] = 1.23348e-017; S[2] = 0.0396248; S[1] = 0.152448; S[0] = 0.224594; */ //teste //std::cout << "U:" << std::endl; //cusp::print(U); //teste //std::cout << "S:" << std::endl; //cusp::print(S); //******************** Inicio da Decomposicao ************************* //Array1d rho(E.num_cols - 1); Array1d rho(E.num_cols - 1); thrust::transform(S.begin(), S.end()-1, rho.begin(), cusp::sqrt_functor<type>()); //thrust::transform(S.rbegin(), S.rend() -1, rho.begin(), // cusp::sqrt_functor<type>()); //std::cout << "Rho:" << std::endl; //cusp::print(rho); //********************************************* Array2d x(E.num_cols, E.num_cols - 1); cusp::array1d<int,cusp::device_memory> index(intNrColunas*intNrColunas); thrust::sequence(index.begin(), index.end(), 1); //std::cout << "index:" << std::endl; //cusp::print(index); thrust::copy_if(U.values.begin(), U.values.end(), index.begin(), x.values.begin(), is_true<int>(intNrColunas)); //thrust::transform(x.values.begin(), x.values.end(), // x.values.begin(), cusp::multiplies_value<float>(-1)); //std::cout << "X:" << std::endl; //cusp::print(x); //Teste /* x(0, 0) = -0.486383; x(0, 1) = 0.19879; x(0, 2) = -0.161157; x(0, 3) = -0.104751; x(0, 4) = -0.30292; x(0, 5) = 0.617952; x(0, 6) = 0.00336875; x(0, 7) = 0.12139; x(1, 0) = -0.257823; x(1, 1) = -0.470035; x(1, 2) = 0.00482367; x(1, 3) = -0.0783436; x(1, 4) = -0.0339571; x(1, 5) = 0.441138; x(1, 6) = -0.0552392; x(1, 7) = -0.0423734; x(2, 0) = 0.117446; x(2, 1) = -0.515145; x(2, 2) = -0.103892; x(2, 3) = -0.76635; x(2, 4) = 0.100349; x(2, 5) = -0.0886266; x(2, 6) = 0.0471155; x(2, 7) = 0.0603901; x(3, 0) = 0.370876; x(3, 1) = -0.299143; x(3, 2) = 0.210852; x(3, 3) = 0.262026; x(3, 4) = -0.0698713; x(3, 5) = 0.367443; x(3, 6) = 0.263212; x(3, 7) = -0.608804; x(4, 0) = -0.0843047; x(4, 1) = 0.423009; x(4, 2) = -0.175306; x(4, 3) = -0.328237; x(4, 4) = 0.212571; x(4, 5) = 0.0279127; x(4, 6) = 0.70117; x(4, 7) = -0.279298; x(5, 0) = 0.0904064; x(5, 1) = 0.380869; x(5, 2) = 0.623452; x(5, 3) = -0.452362; x(5, 4) = -0.170505; x(5, 5) = 0.0908042; x(5, 6) = -0.37022; x(5, 7) = -0.253274; x(6, 0) = -0.490776; x(6, 1) = -0.0552435; x(6, 2) = 0.158411; x(6, 3) = 0.0819397; x(6, 4) = 0.744001; x(6, 5) = -0.0265378; x(6, 6) = -0.222269; x(6, 7) = -0.291328; x(7, 0) = 0.457635; x(7, 1) = 0.147379; x(7, 2) = 0.120787; x(7, 3) = 0.000417037; x(7, 4) = 0.506722; x(7, 5) = 0.500389; x(7, 6) = 0.0182501; x(7, 7) = 0.496706; x(8, 0) = 0.282924; x(8, 1) = 0.189519; x(8, 2) = -0.677971; x(8, 3) = -0.0886237; x(8, 4) = 0.0870132; x(8, 5) = 0.143339; x(8, 6) = -0.496989; x(8, 7) = -0.368016; */ //std::cout << "X:" << std::endl; //cusp::print(x); //********************************************* Array2d x_sqr(x.num_rows, x.num_cols); thrust::transform(x.values.begin(), x.values.end(), x_sqr.values.begin(), cusp::square_functor<type>()); //std::cout << "x_sqr:" << std::endl; //cusp::print(x_sqr); int ft = intNrLinhas * intNrColunas * (intNrColunas - 1); //std::cout << "ft value = " << ft << std::endl; //********************************************* Array2d T = x_sqr; thrust::transform(T.values.begin(), T.values.end(), T.values.begin(), cusp::multiplies_value<type>(intNrLinhas * (intNrColunas - 1))); //std::cout << "T:" << std::endl; //cusp::print(T); //********************************************* cusp::array1d<int,cusp::device_memory> index_sum_t(intNrColunas); cusp::array1d<type,cusp::device_memory> marginal_sum_t(intNrColunas-1); // ? cusp::array2d<type,cusp::device_memory> T_T(intNrColunas,intNrLinhas); cusp::transpose(T, T_T); //std::cout << "T Transposta:" << std::endl; //cusp::print(T_T); Array1d index_similar(intNrColunas*(intNrColunas - 1)); thrust::tabulate(thrust::device, index_similar.begin(), index_similar.end(), linear_index_to_row_index<int>(intNrColunas)); //cusp::print(index_similar); thrust::reduce_by_key(index_similar.begin(), index_similar.end(), T_T.values.begin(), index_sum_t.begin(), marginal_sum_t.begin(), thrust::equal_to<int>(), thrust::plus<type>()); //std::cout << "cc:" << std::endl; //cusp::print(marginal_sum_t); cusp::array1d<type,cusp::device_memory> cc(intNrColunas-1); thrust::transform(marginal_sum_t.begin(), marginal_sum_t.end(), cc.begin(), reciprocal_my<type>(type(ft))); //std::cout << "cc:" << std::endl; //cusp::print(cc); //********************************************* Array1d index_X(intNrColunas*(intNrColunas - 1)); thrust::sequence(index_X.begin(), index_X.end(),0); cusp::array2d<type,cusp::device_memory> x_normed(x.num_rows,x.num_cols); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(index_X.begin(), x.values.begin())), thrust::make_zip_iterator(thrust::make_tuple(index_X.end(), x.values.end())), x_normed.values.begin(), column_by_vector<type>(thrust::raw_pointer_cast(cc.data()),(type)x.num_cols)); //std::cout << "x_normed:" << std::endl; //cusp::print(x_normed); //************************************** cusp::array2d<type,cusp::device_memory> x_project(x.num_rows,x.num_cols); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(index_X.begin(), x_normed.values.begin())), thrust::make_zip_iterator(thrust::make_tuple(index_X.end(), x_normed.values.end())), x_project.values.begin(), column_by_vector<type>(thrust::raw_pointer_cast(rho.data()),(type)x.num_cols)); //std::cout << "x_project:" << std::endl; //cusp::print(x_project); //*************************** GPU para Memria principal ********************** cusp::array2d<type,cusp::host_memory> out(x_project); auto finish = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::ratio<1>> elapsed_seconds = finish - start; auto time = elapsed_seconds.count(); //std::cout << "x_project:" << std::endl; //cusp::print(x_project); std::ofstream myfile; //cout << strPrefixFile << endl; myfile.open ("./output/" + strPrefixFile + "_dominance_tempoProcessamentoGPU.txt"); myfile << "Nome arquivo: " << strPrefixFile << std::endl; myfile << "Matriz formato: [" << E.num_rows << "," << E.num_cols << "]" << std::endl; myfile << "Tempo calcular em GPU (segundos): " << time << std::endl; myfile << "Matriz Xproject: [" << x_project.num_rows << "," << x_project.num_cols << "]" << std::endl; myfile.close(); std::cout << "Finished with success!" << std::endl; //*/ return 0; }
4a9cd9e49dc1da5d487f234edb80c2528e17a717.cu
#include <fstream> #include <iostream> #include <chrono> #include <cusolverDn.h> #include <cublas_v2.h> #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/convert.h> #include <cusp/elementwise.h> #include <cusp/functional.h> #include <cusp/multiply.h> #include <cusp/print.h> #include <cusp/transpose.h> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <thrust/tabulate.h> #include <thrust/execution_policy.h> //#include "multShare.h" using namespace std; typedef float type; typedef cusp::array1d<type, cusp::device_memory> Array1d; typedef cusp::array2d<type, cusp::device_memory> Array2d; // convert a linear index to a row index template <typename T> struct linear_index_to_row_index : public thrust::unary_function<T,T> { T C; // number of columns __host__ __device__ linear_index_to_row_index(T C) : C(C) {} __host__ __device__ T operator()(T i) { return i / C; } }; template <typename T> struct reciprocal_my : public thrust::unary_function<T,T> { T value; reciprocal_my(T thr) : value(thr) {}; __host__ __device__ T operator()(const T& v) const { return sqrt(T(value) / v); } }; template<typename T> struct is_true: thrust::unary_function<T, T> { T col; is_true(T _c) : col(_c) { } ; __host__ __device__ bool operator()(const T &x) { return (x % col) != 0; } }; /* template<typename T> struct sub_matrix: thrust::unary_function<T, T> { T col; T row; T pitch; sub_matrix(T _c, T _r, T _p) : col(_c), row(_r), pitch(_p) { }; __host__ __device__ bool operator()(const T &x) { return (x % (int)pitch) < (int) col && (x / (int)pitch) < (int)row; } }; */ /* template<typename T> struct is_diagonal: thrust::unary_function<T, T> { T col; is_diagonal(T _c) : col(_c) { } ; __host__ __device__ bool operator()(const T &x) { return (x % (col + 1)) == 0; } }; */ template <typename T> struct column_by_vector : public thrust::unary_function<T,T> { T* data; T col; column_by_vector(T *_data, T _col) : data(_data), col(_col) {}; __host__ __device__ T operator()(const thrust::tuple<int,type>& v) { return data[thrust::get<0>(v) % (int)col] * thrust::get<1>(v); } }; bool svd(int M, cusp::array2d<type, cusp::device_memory>& M_denseHM, cusp::array2d<type, cusp::device_memory>& U, cusp::array1d<type, cusp::device_memory>& S) { thrust::device_ptr<type> dev_ptr = &(M_denseHM.values[0]); type *M_raw_ptr = thrust::raw_pointer_cast(dev_ptr); int work_size = 0; int *devInfo; cudaMalloc(&devInfo, sizeof(int)); type *d_U; cudaMalloc(&d_U, M * M * sizeof(type)); type *d_V; cudaMalloc(&d_V, M * M * sizeof(type)); type *d_S; cudaMalloc(&d_S, M * sizeof(type)); //cusolverStatus_t stat; cusolverDnHandle_t solver_handle; cusolverDnCreate(&solver_handle); cusolverDnSgesvd_bufferSize(solver_handle, M, M, &work_size); type *work; cudaMalloc(&work, work_size * sizeof(type)); cusolverDnSgesvd(solver_handle, 'A', 'A', M, M, M_raw_ptr, M, d_S, d_U, M, d_V, M, work, work_size, NULL, devInfo); int devInfo_h = 0; cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost); thrust::device_ptr<type> dev_ptr_U(d_U); thrust::copy(thrust::device, dev_ptr_U, dev_ptr_U + (M * M), U.values.begin()); thrust::device_ptr<type> dev_ptr_S(d_S); thrust::copy(thrust::device, dev_ptr_S, dev_ptr_S + M, S.begin()); cusolverDnDestroy(solver_handle); return 1; } int getmultiple16sizeMatriz(int n){ if (n % 16) n = n + (16 - n % 16); return n; } /* void multiply(Array2d & A, Array2d& B, Array2d& C){ // Load A and B to device memory Array2d A_new(getmultiple16sizeMatriz(A.num_rows), getmultiple16sizeMatriz(A.num_cols)); cusp::array1d<int,cusp::device_memory> index(A_new.num_rows*A_new.num_cols); thrust::sequence(index.begin(), index.end(),0); A_new(0,0) = 0; A_new(0,1) = 1; A_new(0,2) = 2; A_new(1,0) = 3; A_new(1,1) = 4; A_new(1,2) = 5; A_new(2,0) = 6; A_new(2,1) = 7; A_new(2,2) = 8; A 1 2 3 4 A_new 1 2 0 3 4 0 0 0 0 cusp::print(A_new); Matrix d_A; d_A.width = d_A.stride = A_new.num_cols; d_A.height = A_new.num_rows; thrust::device_ptr<float> dev_ptr_A = &(A_new.values[0]); d_A.elements = thrust::raw_pointer_cast(dev_ptr_A); Array2d B_new(getmultiple16sizeMatriz(B.num_rows), getmultiple16sizeMatriz(B.num_cols)); B_new(0,0) = 0; B_new(0,1) = 1; B_new(0,2) = 2; B_new(1,0) = 3; B_new(1,1) = 4; B_new(1,2) = 5; B_new(2,0) = 6; B_new(2,1) = 7; B_new(2,2) = 8; cusp::print(B_new); Matrix d_B; d_B.width = d_B.stride = B_new.num_cols; d_B.height = B_new.num_rows; thrust::device_ptr<float> dev_ptr_B = &(B_new.values[0]); d_B.elements = thrust::raw_pointer_cast(dev_ptr_B); Array2d C_new(getmultiple16sizeMatriz(C.num_rows), getmultiple16sizeMatriz(C.num_cols)); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C_new.num_cols; d_C.height = C_new.num_rows; auto size = d_C.width * d_C.height * sizeof(float); auto err = cudaMalloc(&d_C.elements, size); printf("CUDA malloc C: %s\n",cudaGetErrorString(err)); MatMul(d_A,d_B,d_C); thrust::device_ptr<type> dev_ptr_S(d_C.elements); thrust::copy(thrust::device, dev_ptr_S, dev_ptr_S + (d_C.width*d_C.height), C_new.values.begin()); cusp::array1d<int,cusp::device_memory> index_(C_new.num_rows*C_new.num_cols); thrust::sequence(index_.begin(), index_.end(),0); thrust::copy_if( dev_ptr_S, dev_ptr_S + (d_C.width*d_C.height), index_.begin(), C.values.begin(), sub_matrix<int>(C.num_rows,C.num_cols,C_new.num_cols)); cudaFree(d_C.elements); cusp::print(C); } */ int main(int argc, char *argv[]) { if (argc != 2) { cout << "Argumento incorreto. " << endl; cout << "parâmetro entrada: ./main <base de dados> " << endl; return 1; } auto start = std::chrono::high_resolution_clock::now(); //******************** ler base de dados ************************* std::string strFile = argv[1]; std::string strPrefixFile = strFile.substr(strFile.find_last_of("/")+1,strFile.find(".dat")-4); ifstream file; file.open(strFile); if(!file.is_open()){ std::cout << "Error opening file" << std::endl; return 1; } int intNrLinhas = 0; int intNrColunas = 0; std::string strLinha; std::string strNumero; std::vector<type> database; while (getline(file, strLinha)) { stringstream ssLinha(strLinha); while (getline(ssLinha, strNumero, ' ')) { database.push_back(type(std::stoi(strNumero))); if ( intNrLinhas == 0 ){ intNrColunas++; } } intNrLinhas++; } file.close(); //std::cout << "linha " << intNrLinhas << " colunas " << intNrColunas << std::endl; //cudaSetDevice(0); // selecionar placa de video Tesla K40c Array2d E(intNrLinhas, intNrColunas); thrust::copy(database.begin(), database.end(), E.values.begin()); //teste //std::cout << "E:" << std::endl; //cusp::print(E); //******************** E*-2 ************************* thrust::transform(E.values.begin(), E.values.end(), E.values.begin(), cusp::multiplies_value<int>(-2)); //teste //std::cout << "E*-2:" << std::endl; //cusp::print(E); //******************** E*col+1 ************************* thrust::transform(E.values.begin(), E.values.end(), E.values.begin(), cusp::plus_value<int>(E.num_cols + 1)); //teste //std::cout << "E*col+1:" << std::endl; //cusp::print(E); //********************************************* Array2d matrizTransposta; cusp::transpose(E, matrizTransposta); //teste //std::cout << "E Transposta:" << std::endl; //cusp::print(matrizTransposta); Array2d C(E.num_cols, E.num_cols); //multiply(matrizTransposta, E, C); //testemultiply //cusp::multiply(matrizTransposta, E, C); //std::cout << "C:" << std::endl; //cusp::print(C); cublasHandle_t h; cublasCreate(&h); float alpha = 1.0f; float beta = 0.0f; //cublasSgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, B.num_cols, A.num_rows, A.num_cols, &alpha, thrust::raw_pointer_cast(B.values.data()), B.num_cols, thrust::raw_pointer_cast(A.values.data()), A.num_cols, &beta, thrust::raw_pointer_cast(C.values.data()), B.num_cols); cublasSgemm(h, CUBLAS_OP_N, CUBLAS_OP_N, E.num_cols, matrizTransposta.num_rows, matrizTransposta.num_cols, &alpha, thrust::raw_pointer_cast(E.values.data()), E.num_cols, thrust::raw_pointer_cast(matrizTransposta.values.data()), matrizTransposta.num_cols, &beta, thrust::raw_pointer_cast(C.values.data()), E.num_cols); //teste //std::cout << "C:" << std::endl; //cusp::print(C); thrust::transform(C.values.begin(), C.values.end(), C.values.begin(), cusp::divide_value<type>( E.num_cols * E.num_rows * (E.num_cols - 1) * (E.num_cols - 1))); //teste //std::cout << "C_div:" << std::endl; //cusp::print(C); //******************** Inicio da Decomposicao ************************* cusp::array2d<type, cusp::device_memory> U(C.num_cols, C.num_cols); cusp::array1d<type, cusp::device_memory> S(C.num_cols); //std::cout << "Inicio da Decomposicao" << std::endl; svd(C.num_cols, C, U, S); //teste //std::cout << "U:" << std::endl; //cusp::print(U); //teste //std::cout << "S:" << std::endl; //cusp::print(S); /* // Teste U(0, 0) = 0.445644; U(0, 1) = -0.12139; U(0, 2) = -0.00336875; U(0, 3) = -0.617952; U(0, 4) = 0.30292; U(0, 5) = 0.104751; U(0, 6) = 0.161157; U(0, 7) = -0.19879; U(0, 8) = 0.486383; U(1, 0) = -0.711218; U(1, 1) = 0.0423734; U(1, 2) = 0.0552392; U(1, 3) = -0.441138; U(1, 4) = 0.0339571; U(1, 5) = 0.0783436; U(1, 6) = -0.00482367; U(1, 7) = 0.470035; U(1, 8) = 0.257823; U(2, 0) = 0.31457; U(2, 1) = -0.0603901; U(2, 2) = -0.0471155; U(2, 3) = 0.0886266; U(2, 4) = -0.100349; U(2, 5) = 0.76635; U(2, 6) = 0.103892; U(2, 7) = 0.515145; U(2, 8) = -0.117446; U(3, 0) = 0.282895; U(3, 1) = 0.608804; U(3, 2) = -0.263212; U(3, 3) = -0.367443; U(3, 4) = 0.0698713; U(3, 5) = -0.262026; U(3, 6) = -0.210852; U(3, 7) = 0.299143; U(3, 8) = -0.370876; U(4, 0) = -0.244688; U(4, 1) = 0.279298; U(4, 2) = -0.70117; U(4, 3) = -0.0279127; U(4, 4) = -0.212571; U(4, 5) = 0.328237; U(4, 6) = 0.175306; U(4, 7) = -0.423009; U(4, 8) = 0.0843047; U(5, 0) = -0.12212; U(5, 1) = 0.253274; U(5, 2) = 0.37022; U(5, 3) = -0.0908042; U(5, 4) = 0.170505; U(5, 5) = 0.452362; U(5, 6) = -0.623452; U(5, 7) = -0.380869; U(5, 8) = -0.0904064; U(6, 0) = 0.189107; U(6, 1) = 0.291328; U(6, 2) = 0.222269; U(6, 3) = 0.0265378; U(6, 4) = -0.744001; U(6, 5) = -0.0819397; U(6, 6) = -0.158411; U(6, 7) = 0.0552435; U(6, 8) = 0.490776; U(7, 0) = -0.00735859; U(7, 1) = -0.496706; U(7, 2) = -0.0182501; U(7, 3) = -0.500389; U(7, 4) = -0.506722; U(7, 5) = -0.000417037; U(7, 6) = -0.120787; U(7, 7) = -0.147379; U(7, 8) = -0.457635; U(8, 0) = -0.0773703; U(8, 1) = 0.368016; U(8, 2) = 0.496989; U(8, 3) = -0.143339; U(8, 4) = -0.0870132; U(8, 5) = 0.0886237; U(8, 6) = 0.677971; U(8, 7) = -0.189519; U(8, 8) = -0.282924; // Teste S[0] = -5.05182e-017; S[1] = -8.22593e-018; S[2] = -4.1709e-018; S[3] = 1.18595e-018; S[4] = 7.85299e-018; S[5] = 1.23348e-017; S[6] = 0.0396248; S[7] = 0.152448; S[8] = 0.224594; */ /* // Teste U(0, 0) = -0.486383 ;U(0, 1) = 0.19879; U(0, 2) = -0.161157; U(0, 3) = -0.104751; U(0, 4) = -0.30292; U(0, 5) = 0.617952; U(0, 6) = 0.00336875; U(0, 7) = 0.12139; U(1, 0) = -0.257823 ;U(1, 1) = -0.470035; U(1, 2) = 0.00482367; U(1, 3) = -0.0783436; U(1, 4) = -0.0339571; U(1, 5) = 0.441138; U(1, 6) = -0.0552392; U(1, 7) = -0.0423734; U(2, 0) = 0.117446 ;U(2, 1) = -0.515145; U(2, 2) = -0.103892; U(2, 3) = -0.76635; U(2, 4) = 0.100349; U(2, 5) = -0.0886266; U(2, 6) = 0.0471155; U(2, 7) = 0.0603901; U(3, 0) = 0.370876 ;U(3, 1) = -0.299143; U(3, 2) = 0.210852; U(3, 3) = 0.262026; U(3, 4) = -0.0698713; U(3, 5) = 0.367443; U(3, 6) = 0.263212; U(3, 7) = -0.608804; U(4, 0) = -0.0843047 ;U(4, 1) = 0.423009; U(4, 2) = -0.175306; U(4, 3) = -0.328237; U(4, 4) = 0.212571; U(4, 5) = 0.0279127; U(4, 6) = 0.70117; U(4, 7) = -0.279298; U(5, 0) = 0.0904064 ;U(5, 1) = 0.380869; U(5, 2) = 0.623452; U(5, 3) = -0.452362; U(5, 4) = -0.170505; U(5, 5) = 0.0908042; U(5, 6) = -0.37022; U(5, 7) = -0.253274; U(6, 0) = -0.490776 ;U(6, 1) = -0.0552435; U(6, 2) = 0.158411; U(6, 3) = 0.0819397; U(6, 4) = 0.744001; U(6, 5) = -0.0265378; U(6, 6) = -0.222269; U(6, 7) = -0.291328; U(7, 0) = 0.457635 ;U(7, 1) = 0.147379; U(7, 2) = 0.120787; U(7, 3) = 0.000417037; U(7, 4) = 0.506722; U(7, 5) = 0.500389; U(7, 6) = 0.0182501; U(7, 7) = 0.496706; U(8, 0) = 0.282924 ;U(8, 1) = 0.189519; U(8, 2) = -0.677971; U(8, 3) = -0.0886237; U(8, 4) = 0.0870132; U(8, 5) = 0.143339; U(8, 6) = -0.496989; U(8, 7) = -0.368016; // Teste S[7] =-8.22593e-018; S[6] = -4.1709e-018; S[5] = 1.18595e-018; S[4] = 7.85299e-018; S[3] = 1.23348e-017; S[2] = 0.0396248; S[1] = 0.152448; S[0] = 0.224594; */ //teste //std::cout << "U:" << std::endl; //cusp::print(U); //teste //std::cout << "S:" << std::endl; //cusp::print(S); //******************** Inicio da Decomposicao ************************* //Array1d rho(E.num_cols - 1); Array1d rho(E.num_cols - 1); thrust::transform(S.begin(), S.end()-1, rho.begin(), cusp::sqrt_functor<type>()); //thrust::transform(S.rbegin(), S.rend() -1, rho.begin(), // cusp::sqrt_functor<type>()); //std::cout << "Rho:" << std::endl; //cusp::print(rho); //********************************************* Array2d x(E.num_cols, E.num_cols - 1); cusp::array1d<int,cusp::device_memory> index(intNrColunas*intNrColunas); thrust::sequence(index.begin(), index.end(), 1); //std::cout << "index:" << std::endl; //cusp::print(index); thrust::copy_if(U.values.begin(), U.values.end(), index.begin(), x.values.begin(), is_true<int>(intNrColunas)); //thrust::transform(x.values.begin(), x.values.end(), // x.values.begin(), cusp::multiplies_value<float>(-1)); //std::cout << "X:" << std::endl; //cusp::print(x); //Teste /* x(0, 0) = -0.486383; x(0, 1) = 0.19879; x(0, 2) = -0.161157; x(0, 3) = -0.104751; x(0, 4) = -0.30292; x(0, 5) = 0.617952; x(0, 6) = 0.00336875; x(0, 7) = 0.12139; x(1, 0) = -0.257823; x(1, 1) = -0.470035; x(1, 2) = 0.00482367; x(1, 3) = -0.0783436; x(1, 4) = -0.0339571; x(1, 5) = 0.441138; x(1, 6) = -0.0552392; x(1, 7) = -0.0423734; x(2, 0) = 0.117446; x(2, 1) = -0.515145; x(2, 2) = -0.103892; x(2, 3) = -0.76635; x(2, 4) = 0.100349; x(2, 5) = -0.0886266; x(2, 6) = 0.0471155; x(2, 7) = 0.0603901; x(3, 0) = 0.370876; x(3, 1) = -0.299143; x(3, 2) = 0.210852; x(3, 3) = 0.262026; x(3, 4) = -0.0698713; x(3, 5) = 0.367443; x(3, 6) = 0.263212; x(3, 7) = -0.608804; x(4, 0) = -0.0843047; x(4, 1) = 0.423009; x(4, 2) = -0.175306; x(4, 3) = -0.328237; x(4, 4) = 0.212571; x(4, 5) = 0.0279127; x(4, 6) = 0.70117; x(4, 7) = -0.279298; x(5, 0) = 0.0904064; x(5, 1) = 0.380869; x(5, 2) = 0.623452; x(5, 3) = -0.452362; x(5, 4) = -0.170505; x(5, 5) = 0.0908042; x(5, 6) = -0.37022; x(5, 7) = -0.253274; x(6, 0) = -0.490776; x(6, 1) = -0.0552435; x(6, 2) = 0.158411; x(6, 3) = 0.0819397; x(6, 4) = 0.744001; x(6, 5) = -0.0265378; x(6, 6) = -0.222269; x(6, 7) = -0.291328; x(7, 0) = 0.457635; x(7, 1) = 0.147379; x(7, 2) = 0.120787; x(7, 3) = 0.000417037; x(7, 4) = 0.506722; x(7, 5) = 0.500389; x(7, 6) = 0.0182501; x(7, 7) = 0.496706; x(8, 0) = 0.282924; x(8, 1) = 0.189519; x(8, 2) = -0.677971; x(8, 3) = -0.0886237; x(8, 4) = 0.0870132; x(8, 5) = 0.143339; x(8, 6) = -0.496989; x(8, 7) = -0.368016; */ //std::cout << "X:" << std::endl; //cusp::print(x); //********************************************* Array2d x_sqr(x.num_rows, x.num_cols); thrust::transform(x.values.begin(), x.values.end(), x_sqr.values.begin(), cusp::square_functor<type>()); //std::cout << "x_sqr:" << std::endl; //cusp::print(x_sqr); int ft = intNrLinhas * intNrColunas * (intNrColunas - 1); //std::cout << "ft value = " << ft << std::endl; //********************************************* Array2d T = x_sqr; thrust::transform(T.values.begin(), T.values.end(), T.values.begin(), cusp::multiplies_value<type>(intNrLinhas * (intNrColunas - 1))); //std::cout << "T:" << std::endl; //cusp::print(T); //********************************************* cusp::array1d<int,cusp::device_memory> index_sum_t(intNrColunas); cusp::array1d<type,cusp::device_memory> marginal_sum_t(intNrColunas-1); // ? cusp::array2d<type,cusp::device_memory> T_T(intNrColunas,intNrLinhas); cusp::transpose(T, T_T); //std::cout << "T Transposta:" << std::endl; //cusp::print(T_T); Array1d index_similar(intNrColunas*(intNrColunas - 1)); thrust::tabulate(thrust::device, index_similar.begin(), index_similar.end(), linear_index_to_row_index<int>(intNrColunas)); //cusp::print(index_similar); thrust::reduce_by_key(index_similar.begin(), index_similar.end(), T_T.values.begin(), index_sum_t.begin(), marginal_sum_t.begin(), thrust::equal_to<int>(), thrust::plus<type>()); //std::cout << "cc:" << std::endl; //cusp::print(marginal_sum_t); cusp::array1d<type,cusp::device_memory> cc(intNrColunas-1); thrust::transform(marginal_sum_t.begin(), marginal_sum_t.end(), cc.begin(), reciprocal_my<type>(type(ft))); //std::cout << "cc:" << std::endl; //cusp::print(cc); //********************************************* Array1d index_X(intNrColunas*(intNrColunas - 1)); thrust::sequence(index_X.begin(), index_X.end(),0); cusp::array2d<type,cusp::device_memory> x_normed(x.num_rows,x.num_cols); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(index_X.begin(), x.values.begin())), thrust::make_zip_iterator(thrust::make_tuple(index_X.end(), x.values.end())), x_normed.values.begin(), column_by_vector<type>(thrust::raw_pointer_cast(cc.data()),(type)x.num_cols)); //std::cout << "x_normed:" << std::endl; //cusp::print(x_normed); //************************************** cusp::array2d<type,cusp::device_memory> x_project(x.num_rows,x.num_cols); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(index_X.begin(), x_normed.values.begin())), thrust::make_zip_iterator(thrust::make_tuple(index_X.end(), x_normed.values.end())), x_project.values.begin(), column_by_vector<type>(thrust::raw_pointer_cast(rho.data()),(type)x.num_cols)); //std::cout << "x_project:" << std::endl; //cusp::print(x_project); //*************************** GPU para Memória principal ********************** cusp::array2d<type,cusp::host_memory> out(x_project); auto finish = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::ratio<1>> elapsed_seconds = finish - start; auto time = elapsed_seconds.count(); //std::cout << "x_project:" << std::endl; //cusp::print(x_project); std::ofstream myfile; //cout << strPrefixFile << endl; myfile.open ("./output/" + strPrefixFile + "_dominance_tempoProcessamentoGPU.txt"); myfile << "Nome arquivo: " << strPrefixFile << std::endl; myfile << "Matriz formato: [" << E.num_rows << "," << E.num_cols << "]" << std::endl; myfile << "Tempo calcular em GPU (segundos): " << time << std::endl; myfile << "Matriz Xproject: [" << x_project.num_rows << "," << x_project.num_cols << "]" << std::endl; myfile.close(); std::cout << "Finished with success!" << std::endl; //*/ return 0; }
e45246684cebdb926777c8e7b4ec2cf4bd132148.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. * * @author Ania Brown * @author Tyson Jones */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_validation.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> #ifdef USE_HIP // Translate CUDA calls into HIP calls #include "cuda_to_hip.h" #endif # define REDUCE_SHARED_SIZE 512 # define DEBUG 0 /* * struct types for concisely passing unitaries to kernels */ // hide these from doxygen /// \cond HIDDEN_SYMBOLS typedef struct ArgMatrix2 { Complex r0c0, r0c1; Complex r1c0, r1c1; } ArgMatrix2; typedef struct ArgMatrix4 { Complex r0c0, r0c1, r0c2, r0c3; Complex r1c0, r1c1, r1c2, r1c3; Complex r2c0, r2c1, r2c2, r2c3; Complex r3c0, r3c1, r3c2, r3c3; } ArgMatrix4; ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) { ArgMatrix2 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; return a; } ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) { ArgMatrix4 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2]; a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2]; a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3]; a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0]; a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1]; a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2]; a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3]; a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0]; a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1]; a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2]; a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3]; return a; } /// \endcond /* * in-kernel bit twiddling functions */ __forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } __forceinline__ __device__ int getBitMaskParity(long long int mask) { int parity = 0; while (mask) { parity = !parity; mask = mask & (mask-1); } return parity; } __forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) { return (number ^ (1LL << bitInd)); } __forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) { long long int left, right; left = (number >> index) << index; right = number - left; return (left << 1) ^ right; } __forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) { int small = (bit1 < bit2)? bit1 : bit2; int big = (bit1 < bit2)? bit2 : bit1; return insertZeroBit(insertZeroBit(number, small), big); } __forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) { /* inserted bit inds must strictly increase, so that their final indices are correct. * in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already * memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each * at each insert. recall every element of inds (a positive or zero number) is unique. * This function won't appear in the CPU code, which can use C99 variable-size arrays and * ought to make a sorted array before threading */ int curMin = inds[0]; int prevMin = -1; for (int n=0; n < numInds; n++) { // find next min for (int t=0; t < numInds; t++) if (inds[t]>prevMin && inds[t]<curMin) curMin = inds[t]; number = insertZeroBit(number, curMin); // set curMin to an arbitrary non-visited elem prevMin = curMin; for (int t=0; t < numInds; t++) if (inds[t] > curMin) { curMin = inds[t]; break; } } return number; } /* * state vector and density matrix operations */ #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { hipDeviceSynchronize(); hipMemcpy( qureg.deviceStateVec.real + startInd, reals, numAmps * sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy( qureg.deviceStateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec hipDeviceSynchronize(); hipMemcpy( targetQureg.deviceStateVec.real, copyQureg.deviceStateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)), hipMemcpyDeviceToDevice); hipMemcpy( targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)), hipMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, copyQureg.numAmpsPerChunk, targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, probFactor, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // check cpu memory allocation was successful validateQuregAllocation(qureg, env, __func__); // allocate GPU memory hipMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real))); hipMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag))); hipMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); hipMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); // check gpu memory allocation was successful validateQuregGPUAllocation(qureg, env, __func__); } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory free(qureg.stateVec.real); free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory hipFree(qureg.deviceStateVec.real); hipFree(qureg.deviceStateVec.imag); hipFree(qureg.firstLevelReduction); hipFree(qureg.secondLevelReduction); } DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) { DiagonalOp op; op.numQubits = numQubits; op.numElemsPerChunk = (1LL << numQubits) / env.numRanks; op.chunkId = env.rank; op.numChunks = env.numRanks; // allocate CPU memory (initialised to zero) op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); // @TODO no handling of rank>1 allocation (no distributed GPU) // check cpu memory allocation was successful validateDiagonalOpAllocation(&op, env, __func__); // allocate GPU memory size_t arrSize = op.numElemsPerChunk * sizeof(qreal); hipMalloc(&(op.deviceOperator.real), arrSize); hipMalloc(&(op.deviceOperator.imag), arrSize); // check gpu memory allocation was successful validateDiagonalOpGPUAllocation(&op, env, __func__); // initialise GPU memory to zero hipMemset(op.deviceOperator.real, 0, arrSize); hipMemset(op.deviceOperator.imag, 0, arrSize); return op; } void agnostic_destroyDiagonalOp(DiagonalOp op) { free(op.real); free(op.imag); hipFree(op.deviceOperator.real); hipFree(op.deviceOperator.imag); } void agnostic_syncDiagonalOp(DiagonalOp op) { hipDeviceSynchronize(); size_t mem_elems = op.numElemsPerChunk * sizeof *op.real; hipMemcpy(op.deviceOperator.real, op.real, mem_elems, hipMemcpyHostToDevice); hipMemcpy(op.deviceOperator.imag, op.imag, mem_elems, hipMemcpyHostToDevice); } __global__ void agnostic_initDiagonalOpFromPauliHamilKernel( DiagonalOp op, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms ) { // each thread processes one diagonal element long long int elemInd = blockIdx.x*blockDim.x + threadIdx.x; if (elemInd >= op.numElemsPerChunk) return; qreal elem = 0; // elem is (+-) every coefficient, with sign determined by parity for (int t=0; t<numSumTerms; t++) { // determine the parity of the Z-targeted qubits in the element's corresponding state int isOddNumOnes = 0; for (int q=0; q<op.numQubits; q++) if (pauliCodes[q + t*op.numQubits] == PAULI_Z) if (extractBit(q, elemInd)) isOddNumOnes = !isOddNumOnes; // avoid warp divergence int sign = 1 - 2*isOddNumOnes; // (-1 if isOddNumOnes, else +1) elem += termCoeffs[t] * sign; } op.deviceOperator.real[elemInd] = elem; op.deviceOperator.imag[elemInd] = 0; } void agnostic_initDiagonalOpFromPauliHamil(DiagonalOp op, PauliHamil hamil) { // copy args intop GPU memory enum pauliOpType* d_pauliCodes; size_t mem_pauliCodes = hamil.numSumTerms * op.numQubits * sizeof *d_pauliCodes; hipMalloc(&d_pauliCodes, mem_pauliCodes); hipMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, hipMemcpyHostToDevice); qreal* d_termCoeffs; size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs; hipMalloc(&d_termCoeffs, mem_termCoeffs); hipMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, hipMemcpyHostToDevice); int numThreadsPerBlock = 128; int numBlocks = ceil(op.numElemsPerChunk / (qreal) numThreadsPerBlock); hipLaunchKernelGGL(( agnostic_initDiagonalOpFromPauliHamilKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, op, d_pauliCodes, d_termCoeffs, hamil.numSumTerms); // copy populated operator into to RAM hipDeviceSynchronize(); size_t mem_elems = op.numElemsPerChunk * sizeof *op.real; hipMemcpy(op.real, op.deviceOperator.real, mem_elems, hipMemcpyDeviceToHost); hipMemcpy(op.imag, op.deviceOperator.imag, mem_elems, hipMemcpyDeviceToHost); hipFree(d_pauliCodes); hipFree(d_termCoeffs); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct hipDeviceProp_t properties; hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount); if (cudaResultCode != hipSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { hipGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { validateGPUExists(GPUExists(), __func__); QuESTEnv env; env.rank=0; env.numRanks=1; env.seeds = NULL; env.numSeeds = 0; seedQuESTDefault(&env); return env; } void syncQuESTEnv(QuESTEnv env){ hipDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ free(env.seeds); } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, char str[200]){ // OpenMP can be hybridised with GPU in future, so this check is safe and worthwhile int ompStatus=0; int numThreads=1; # ifdef _OPENMP ompStatus=1; numThreads=omp_get_max_threads(); # endif // there is no reporting of CUDA cores/threads/blocks currently (since non-trivial) sprintf(str, "CUDA=1 OpenMP=%d MPI=0 threads=%d ranks=1", ompStatus, numThreads); } void copyStateToGPU(Qureg qureg) { if (DEBUG) printf("Copying data to GPU\n"); hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { hipDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); hipMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost); hipMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } void statevec_copySubstateToGPU(Qureg qureg, long long int startInd, long long int numAmps) { if (DEBUG) printf("Copying data to GPU\n"); hipMemcpy(&(qureg.deviceStateVec.real[startInd]), &(qureg.stateVec.real[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice); hipMemcpy(&(qureg.deviceStateVec.imag[startInd]), &(qureg.stateVec.imag[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void statevec_copySubstateFromGPU(Qureg qureg, long long int startInd, long long int numAmps) { hipDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); hipMemcpy(&(qureg.stateVec.real[startInd]), &(qureg.deviceStateVec.real[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost); hipMemcpy(&(qureg.stateVec.imag[startInd]), &(qureg.deviceStateVec.imag[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; copyStateFromGPU(qureg); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ qreal el=0; hipMemcpy(&el, &(qureg.deviceStateVec.real[index]), sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost); return el; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ qreal el=0; hipMemcpy(&el, &(qureg.deviceStateVec.imag[index]), sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost); return el; } __global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the statevector to be all-zeros index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } void statevec_initBlankState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initBlankStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 stateVecReal[0] = 1.0; stateVecImag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, stateInd); } __global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initDebugState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initDebugStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ sscanf(line, REAL_SPECIFIER ", " REAL_SPECIFIER, &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; copyStateFromGPU(mq1); copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledMultiQubitUnitaryKernel( Qureg qureg, long long int ctrlMask, int* targs, int numTargs, qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps) { // decide the amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes if (thisTask>=numTasks) return; // find this task's start index (where all targs are 0) long long int ind00 = insertZeroBits(thisTask, targs, numTargs); // this task only modifies amplitudes if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; /* each thread needs: long long int ampInds[numAmps]; qreal reAmps[numAmps]; qreal imAmps[numAmps]; but instead has access to shared arrays, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine the indices and record values of target amps long long int ind; for (int i=0; i < numTargAmps; i++) { // get global index of current target qubit assignment ind = ind00; for (int t=0; t < numTargs; t++) if (extractBit(t, i)) ind = flipBit(ind, targs[t]); ampInds[i*stride+offset] = ind; reAmps [i*stride+offset] = reVec[ind]; imAmps [i*stride+offset] = imVec[ind]; } // update the amplitudes for (int r=0; r < numTargAmps; r++) { ind = ampInds[r*stride+offset]; reVec[ind] = 0; imVec[ind] = 0; for (int c=0; c < numTargAmps; c++) { qreal uReElem = uRe[c + r*numTargAmps]; qreal uImElem = uIm[c + r*numTargAmps]; reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem; imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem; } } } void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock); // allocate device space for global {targs} (length: numTargs) and populate int *d_targs; size_t targMemSize = numTargs * sizeof *d_targs; hipMalloc(&d_targs, targMemSize); hipMemcpy(d_targs, targs, targMemSize, hipMemcpyHostToDevice); // flatten out the u.real and u.imag lists int uNumRows = (1 << u.numQubits); qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat); qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat); long long int i = 0; for (int r=0; r < uNumRows; r++) for (int c=0; c < uNumRows; c++) { uReFlat[i] = u.real[r][c]; uImFlat[i] = u.imag[r][c]; i++; } // allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate qreal* d_uRe; qreal* d_uIm; size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm hipMalloc(&d_uRe, uMemSize); hipMalloc(&d_uIm, uMemSize); hipMemcpy(d_uRe, uReFlat, uMemSize, hipMemcpyHostToDevice); hipMemcpy(d_uIm, uImFlat, uMemSize, hipMemcpyHostToDevice); // allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs) long long int *d_ampInds; qreal *d_reAmps; qreal *d_imAmps; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; int numTargAmps = uNumRows; hipMalloc(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds); hipMalloc(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps); hipMalloc(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps); // call kernel hipLaunchKernelGGL(( statevec_multiControlledMultiQubitUnitaryKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps); // free kernel memory free(uReFlat); free(uImFlat); hipFree(d_targs); hipFree(d_uRe); hipFree(d_uIm); hipFree(d_ampInds); hipFree(d_reAmps); hipFree(d_imAmps); } __global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){ // decide the 4 amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes if (thisTask>=numTasks) return; qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; // find indices of amplitudes to modify (treat q1 as the least significant bit) long long int ind00, ind01, ind10, ind11; ind00 = insertTwoZeroBits(thisTask, q1, q2); // modify only if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; ind01 = flipBit(ind00, q1); ind10 = flipBit(ind00, q2); ind11 = flipBit(ind01, q2); // extract statevec amplitudes qreal re00, re01, re10, re11; qreal im00, im01, im10, im11; re00 = reVec[ind00]; im00 = imVec[ind00]; re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; re11 = reVec[ind11]; im11 = imVec[ind11]; // apply u * {amp00, amp01, amp10, amp11} reVec[ind00] = u.r0c0.real*re00 - u.r0c0.imag*im00 + u.r0c1.real*re01 - u.r0c1.imag*im01 + u.r0c2.real*re10 - u.r0c2.imag*im10 + u.r0c3.real*re11 - u.r0c3.imag*im11; imVec[ind00] = u.r0c0.imag*re00 + u.r0c0.real*im00 + u.r0c1.imag*re01 + u.r0c1.real*im01 + u.r0c2.imag*re10 + u.r0c2.real*im10 + u.r0c3.imag*re11 + u.r0c3.real*im11; reVec[ind01] = u.r1c0.real*re00 - u.r1c0.imag*im00 + u.r1c1.real*re01 - u.r1c1.imag*im01 + u.r1c2.real*re10 - u.r1c2.imag*im10 + u.r1c3.real*re11 - u.r1c3.imag*im11; imVec[ind01] = u.r1c0.imag*re00 + u.r1c0.real*im00 + u.r1c1.imag*re01 + u.r1c1.real*im01 + u.r1c2.imag*re10 + u.r1c2.real*im10 + u.r1c3.imag*re11 + u.r1c3.real*im11; reVec[ind10] = u.r2c0.real*re00 - u.r2c0.imag*im00 + u.r2c1.real*re01 - u.r2c1.imag*im01 + u.r2c2.real*re10 - u.r2c2.imag*im10 + u.r2c3.real*re11 - u.r2c3.imag*im11; imVec[ind10] = u.r2c0.imag*re00 + u.r2c0.real*im00 + u.r2c1.imag*re01 + u.r2c1.real*im01 + u.r2c2.imag*re10 + u.r2c2.real*im10 + u.r2c3.imag*re11 + u.r2c3.real*im11; reVec[ind11] = u.r3c0.real*re00 - u.r3c0.imag*im00 + u.r3c1.real*re01 - u.r3c1.imag*im01 + u.r3c2.real*re10 - u.r3c2.imag*im10 + u.r3c3.real*re11 - u.r3c3.imag*im11; imVec[ind11] = u.r3c0.imag*re00 + u.r3c0.real*im00 + u.r3c1.imag*re01 + u.r3c1.real*im01 + u.r3c2.imag*re10 + u.r3c2.real*im10 + u.r3c3.imag*re11 + u.r3c3.real*im11; } void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes hipLaunchKernelGGL(( statevec_multiControlledTwoQubitUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, q1, q2, argifyMatrix4(u)); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledUnitaryKernel( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ArgMatrix2 u ){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) { // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ComplexMatrix2 u ){ int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u)); } __global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask = getQubitBitMask(controlQubits, numControlQubits); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; int fac = getBitMaskParity(mask & index)? -1 : 1; qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiRotateZKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiControlledMultiRotateZKernel(Qureg qureg, long long int ctrlMask, long long int targMask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; // amplitudes corresponding to control qubits not all-in-one are unmodified if (ctrlMask && ((ctrlMask & index) != ctrlMask)) return; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; // avoid warp divergence, setting fac = +- 1 int fac = 1-2*getBitMaskParity(targMask & index); qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiControlledMultiRotateZ(Qureg qureg, long long int ctrlMask, long long int targMask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledMultiRotateZKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, targMask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; copyStateFromGPU(qureg); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask = getQubitBitMask(controlQubits, numControlQubits); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask); } __global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) { qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int ind00, ind01, ind10; qreal re01, re10, im01, im10; // determine ind00 of |..0..0..>, |..0..1..> and |..1..0..> ind00 = insertTwoZeroBits(thisTask, qb1, qb2); ind01 = flipBit(ind00, qb1); ind10 = flipBit(ind00, qb2); // extract statevec amplitudes re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; // swap 01 and 10 amps reVec[ind01] = re10; reVec[ind10] = re01; imVec[ind01] = im10; imVec[ind10] = im01; } void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_swapQubitAmpsKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, qb1, qb2); } __global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit); } __global__ void statevec_multiControlledMultiQubitNotKernel(Qureg qureg, int ctrlMask, int targMask) { qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; // althouugh each thread swaps/updates two amplitudes, we still invoke one thread per amp long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= qureg.numAmpsPerChunk) return; // modify amplitudes only if control qubits are 1 for this state if (ctrlMask && ((ctrlMask & ampInd) != ctrlMask)) return; long long int mateInd = ampInd ^ targMask; // if the mate is lower index, another thread is handling it if (mateInd < ampInd) return; /* it may seem wasteful to spawn more threads than are needed, and abort * half of them due to the amp pairing above (and potentially abort * an exponential number due to ctrlMask). however, since we are moving * global memory directly in a potentially non-contiguous fashoin, this * method is likely to be memory bandwidth bottlenecked anyway */ qreal mateRe = stateRe[mateInd]; qreal mateIm = stateIm[mateInd]; // swap amp with mate stateRe[mateInd] = stateRe[ampInd]; stateIm[mateInd] = stateIm[ampInd]; stateRe[ampInd] = mateRe; stateIm[ampInd] = mateIm; } void statevec_multiControlledMultiQubitNot(Qureg qureg, int ctrlMask, int targMask) { int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); hipLaunchKernelGGL(( statevec_multiControlledMultiQubitNotKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, qureg, ctrlMask, targMask); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit) { qreal stateProb=0; // 1-qubit edge-case breaks below loop logic if (qureg.numQubitsInStateVec == 1) { qreal amp; hipMemcpy(&amp, qureg.deviceStateVec.real, sizeof(qreal), hipMemcpyDeviceToHost); stateProb += amp*amp; hipMemcpy(&amp, qureg.deviceStateVec.imag, sizeof(qreal), hipMemcpyDeviceToHost); stateProb += amp*amp; return stateProb; } long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } // atomicAdd on floats/doubles isn't available on <6 CC devices, so we add it ourselves #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __global__ void statevec_calcProbOfAllOutcomesKernel( qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits ) { // each thread handles one amplitude (all amplitudes are involved) long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= qureg.numAmpsTotal) return; qreal prob = ( qureg.deviceStateVec.real[ampInd]*qureg.deviceStateVec.real[ampInd] + qureg.deviceStateVec.imag[ampInd]*qureg.deviceStateVec.imag[ampInd]); // each amplitude contributes to one outcome long long int outcomeInd = 0; for (int q=0; q<numQubits; q++) outcomeInd += extractBit(qubits[q], ampInd) * (1LL << q); // each thread atomically writes directly to the global output. // this beat block-heirarchal atomic reductions in both global and shared memory! atomicAdd(&outcomeProbs[outcomeInd], prob); } void statevec_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) { // copy qubits to GPU memory int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; hipMalloc(&d_qubits, mem_qubits); hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice); // create one thread for every amplitude int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); // create global GPU array for outcomeProbs qreal* d_outcomeProbs; long long int numOutcomes = (1LL << numQubits); size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs; hipMalloc(&d_outcomeProbs, mem_outcomeProbs); hipMemset(d_outcomeProbs, 0, mem_outcomeProbs); // populate per-block subarrays hipLaunchKernelGGL(( statevec_calcProbOfAllOutcomesKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_outcomeProbs, qureg, d_qubits, numQubits); // copy outcomeProbs from GPU memory hipMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, hipMemcpyDeviceToHost); // free GPU memory hipFree(d_qubits); hipFree(d_outcomeProbs); } __global__ void densmatr_calcProbOfAllOutcomesKernel( qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits ) { // each thread handles one diagonal amplitude long long int diagInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numDiags = (1LL << qureg.numQubitsRepresented); if (diagInd >= numDiags) return; long long int flatInd = (1 + numDiags)*diagInd; qreal prob = qureg.deviceStateVec.real[flatInd]; // im[flatInd] assumed ~ 0 // each diagonal amplitude contributes to one outcome long long int outcomeInd = 0; for (int q=0; q<numQubits; q++) outcomeInd += extractBit(qubits[q], diagInd) * (1LL << q); // each thread atomically writes directly to the global output. // this beat block-heirarchal atomic reductions in both global and shared memory! atomicAdd(&outcomeProbs[outcomeInd], prob); } void densmatr_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) { // copy qubits to GPU memory int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; hipMalloc(&d_qubits, mem_qubits); hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice); // create global array, with per-block subarrays int numThreadsPerBlock = 128; int numDiags = (1LL << qureg.numQubitsRepresented); int numBlocks = ceil(numDiags / (qreal) numThreadsPerBlock); // create global GPU array for outcomeProbs qreal* d_outcomeProbs; long long int numOutcomes = (1LL << numQubits); size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs; hipMalloc(&d_outcomeProbs, mem_outcomeProbs); hipMemset(d_outcomeProbs, 0, mem_outcomeProbs); // populate per-block subarrays hipLaunchKernelGGL(( densmatr_calcProbOfAllOutcomesKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_outcomeProbs, qureg, d_qubits, numQubits); // copy outcomeProbs from GPU memory hipMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, hipMemcpyDeviceToHost); // free GPU memory hipFree(d_qubits); hipFree(d_outcomeProbs); } /** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */ __global__ void densmatr_calcInnerProductKernel( Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm qreal prod = ( a.deviceStateVec.real[index]*b.deviceStateVec.real[index] + a.deviceStateVec.imag[index]*b.deviceStateVec.imag[index]); // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = prod; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcInnerProduct(Qureg a, Qureg b) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = a.numAmpsTotal; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the terms in each block // arbitrarily store the reduction in the b qureg's array if (firstTime) { hipLaunchKernelGGL(( densmatr_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, a, b, a.numAmpsTotal, b.firstLevelReduction); firstTime = 0; } // sum the block terms else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, b.firstLevelReduction, b.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal innerprod; hipMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return innerprod; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @todo could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.deviceStateVec.real; qreal* densImag = dens.deviceStateVec.imag; qreal* vecReal = vec.deviceStateVec.real; qreal* vecImag = vec.deviceStateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel( qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm, long long int numAmpsToSum, qreal *reducedArray ) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; // compute this thread's sum term qreal difRe = aRe[index] - bRe[index]; qreal difIm = aIm[index] - bIm[index]; qreal term = difRe*difRe + difIm*difIm; // array of each thread's collected term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */ qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) { // we're summing the square of every term in (a-b) long long int numValuesToReduce = a.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block (store reduction temp values in a's reduction array) if (firstTime) { hipLaunchKernelGGL(( densmatr_calcHilbertSchmidtDistanceSquaredKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, a.deviceStateVec.real, a.deviceStateVec.imag, b.deviceStateVec.real, b.deviceStateVec.imag, numValuesToReduce, a.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, a.firstLevelReduction, a.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal trace; hipMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); qreal sqrtTrace = sqrt(trace); return sqrtTrace; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd]; combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd]; } void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_mixDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) { long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) { if (dephase == 0) return; qreal dephFac = 1 - dephase; densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_mixTwoQubitDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixTwoQubitDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDampingKernel( qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = damping * ( vecReal[targetInd]); qreal imagAvDepol = damping * ( vecImag[targetInd]); vecReal[targetInd] *= 1 - damping; vecImag[targetInd] *= 1 - damping; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; } void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_mixDephasing(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) { if (damping == 0) return; qreal dephase = sqrt(1-damping); densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDampingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_mixTwoQubitDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixTwoQubitDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } __global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numAmpsToVisit = qureg1.numAmpsPerChunk; if (ampInd >= numAmpsToVisit) return; qreal *vecRe1 = qureg1.deviceStateVec.real; qreal *vecIm1 = qureg1.deviceStateVec.imag; qreal *vecRe2 = qureg2.deviceStateVec.real; qreal *vecIm2 = qureg2.deviceStateVec.imag; qreal *vecReOut = out.deviceStateVec.real; qreal *vecImOut = out.deviceStateVec.imag; qreal facRe1 = fac1.real; qreal facIm1 = fac1.imag; qreal facRe2 = fac2.real; qreal facIm2 = fac2.imag; qreal facReOut = facOut.real; qreal facImOut = facOut.imag; qreal re1,im1, re2,im2, reOut,imOut; long long int index = ampInd; re1 = vecRe1[index]; im1 = vecIm1[index]; re2 = vecRe2[index]; im2 = vecIm2[index]; reOut = vecReOut[index]; imOut = vecImOut[index]; vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2); vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2); } void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int numAmpsToVisit = qureg1.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_setWeightedQuregKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, fac1, qureg1, fac2, qureg2, facOut, out ); } __global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask]; qreal d = opIm[thisTask]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op); } __global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; int opDim = (1 << op.numQubits); qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask % opDim]; qreal d = opIm[thisTask % opDim]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op); } __global__ void statevec_applySubDiagonalOpKernel(Qureg qureg, int* targets, int numTargets, qreal* opReals, qreal* opImags, int conjFac) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; long long int v = 0; for (int t=0; t<numTargets; t++) v |= extractBit(targets[t], index) << t; qreal elemRe = opReals[v]; qreal elemIm = opImags[v] * conjFac; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal ampRe = stateRe[index]; qreal ampIm = stateIm[index]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[index] = ampRe*elemRe - ampIm*elemIm; stateIm[index] = ampRe*elemIm + ampIm*elemRe; } void statevec_applySubDiagonalOp(Qureg qureg, int* targets, SubDiagonalOp op, int conj) { // copy targets to GPU memory int* d_targets; int numTargets = op.numQubits; size_t memTargets = numTargets * sizeof *d_targets; hipMalloc(&d_targets, memTargets); hipMemcpy(d_targets, targets, memTargets, hipMemcpyHostToDevice); // copy op to GPU memory qreal* d_opReal; qreal* d_opImag; size_t memOp = op.numElems * sizeof *d_opReal; hipMalloc(&d_opReal, memOp); hipMalloc(&d_opImag, memOp); hipMemcpy(d_opReal, op.real, memOp, hipMemcpyHostToDevice); hipMemcpy(d_opImag, op.imag, memOp, hipMemcpyHostToDevice); // determine factor of imaginary components int conjFac = 1; if (conj) conjFac = -1; // launch kernels int threadsPerCUDABlock = 128; int CUDABlocks = ceil(qureg.numAmpsPerChunk / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_applySubDiagonalOpKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, d_targets, numTargets, d_opReal, d_opImag, conjFac); // free temporary GPU memory hipFree(d_targets); hipFree(d_opReal); hipFree(d_opImag); } /** computes either a real or imag term of |vec_i|^2 op_i */ __global__ void statevec_calcExpecDiagonalOpKernel( int getRealComp, qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // choose whether to calculate the real or imaginary term of the expec term qreal expecVal; if (getRealComp) expecVal = vecAbs * opReal[index]; else expecVal = vecAbs * opImag[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = expecVal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } __global__ void densmatr_calcExpecDiagonalOpKernel( int getRealComp, qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag, int numQubits, long long int numTermsToSum, qreal* reducedArray) { /** if the thread represents a diagonal op, then it computes either a * real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the * reduction array */ // index will identy one of the 2^Q diagonals to be summed long long int matInd = blockIdx.x*blockDim.x + threadIdx.x; if (matInd >= numTermsToSum) return; long long int diagSpacing = (1LL << numQubits) + 1LL; int isDiag = ((matInd % diagSpacing) == 0); long long int opInd = matInd / diagSpacing; qreal val = 0; if (isDiag) { qreal matRe = matReal[matInd]; qreal matIm = matImag[matInd]; qreal opRe = opReal[opInd]; qreal opIm = opImag[opInd]; // (matRe + matIm i)(opRe + opIm i) = // (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe) if (getRealComp) val = matRe * opRe - matIm * opIm; else val = matRe * opIm + matIm * opRe; } // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = val; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) { // update both RAM and VRAM, for consistency memcpy(&op.real[startInd], real, numElems * sizeof(qreal)); memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal)); hipDeviceSynchronize(); hipMemcpy( op.deviceOperator.real + startInd, real, numElems * sizeof(*(op.deviceOperator.real)), hipMemcpyHostToDevice); hipMemcpy( op.deviceOperator.imag + startInd, imag, numElems * sizeof(*(op.deviceOperator.imag)), hipMemcpyHostToDevice); } __global__ void statevec_applyPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int numTerms, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; // determine phase index of {qubits} long long int phaseInd = 0LL; if (encoding == UNSIGNED) { for (int q=0; q<numQubits; q++) phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd); } else if (encoding == TWOS_COMPLEMENT) { for (int q=0; q<numQubits-1; q++) // use final qubit to indicate sign phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd); if (extractBit(qubits[numQubits-1], globalAmpInd) == 1) phaseInd -= (1LL << (numQubits-1)); } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) if (phaseInd == overrideInds[i]) break; // determine phase from {coeffs}, {exponents} (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else for (int t=0; t<numTerms; t++) phase += coeffs[t] * pow((qreal) phaseInd, (qreal) exponents[t]); // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyPhaseFuncOverrides( Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int numTerms, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // allocate device space for global list of {qubits}, {coeffs}, {exponents}, {overrideInds} and {overridePhases} int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; qreal* d_coeffs; size_t mem_terms = numTerms * sizeof *d_coeffs; qreal* d_exponents; long long int* d_overrideInds; size_t mem_inds = numOverrides * sizeof *d_overrideInds; qreal* d_overridePhases; size_t mem_phas = numOverrides * sizeof *d_overridePhases; hipMalloc(&d_qubits, mem_qubits); hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice); hipMalloc(&d_coeffs, mem_terms); hipMemcpy(d_coeffs, coeffs, mem_terms, hipMemcpyHostToDevice); hipMalloc(&d_exponents, mem_terms); hipMemcpy(d_exponents, exponents, mem_terms, hipMemcpyHostToDevice); hipMalloc(&d_overrideInds, mem_inds); hipMemcpy(d_overrideInds, overrideInds, mem_inds, hipMemcpyHostToDevice); hipMalloc(&d_overridePhases,mem_phas); hipMemcpy(d_overridePhases, overridePhases, mem_phas, hipMemcpyHostToDevice); // call kernel int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_applyPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, d_qubits, numQubits, encoding, d_coeffs, d_exponents, numTerms, d_overrideInds, d_overridePhases, numOverrides, conj); // cleanup device memory hipFree(d_qubits); hipFree(d_coeffs); hipFree(d_exponents); hipFree(d_overrideInds); hipFree(d_overridePhases); } __global__ void statevec_applyMultiVarPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int* numTermsPerReg, long long int* overrideInds, qreal* overridePhases, int numOverrides, long long int *phaseInds, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; /* * each thread needs to write to a local: * long long int phaseInds[numRegs]; * but instead has access to shared array phaseInds, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine phase indices int flatInd = 0; if (encoding == UNSIGNED) { for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); } } else if (encoding == TWOS_COMPLEMENT) { for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]-1; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); // use final qubit to indicate sign if (extractBit(qubits[flatInd++], globalAmpInd) == 1) phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1)); } } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) { int found = 1; for (int r=0; r<numRegs; r++) { if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) { found = 0; break; } } if (found) break; } // compute the phase (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else { flatInd = 0; for (int r=0; r<numRegs; r++) { for (int t=0; t<numTermsPerReg[r]; t++) { phase += coeffs[flatInd] * pow((qreal) phaseInds[r*stride+offset], (qreal) exponents[flatInd]); flatInd++; } } } // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyMultiVarPhaseFuncOverrides( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int* numTermsPerReg, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // determine size of arrays, for cloning into GPU memory size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg; size_t mem_numTermsPerReg = numRegs * sizeof *numTermsPerReg; size_t mem_overridePhases = numOverrides * sizeof *overridePhases; size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds; size_t mem_qubits = 0; size_t mem_coeffs = 0; size_t mem_exponents = 0; for (int r=0; r<numRegs; r++) { mem_qubits += numQubitsPerReg[r] * sizeof *qubits; mem_coeffs += numTermsPerReg[r] * sizeof *coeffs; mem_exponents += numTermsPerReg[r] * sizeof *exponents; } // allocate global GPU memory int* d_qubits; hipMalloc(&d_qubits, mem_qubits); qreal* d_coeffs; hipMalloc(&d_coeffs, mem_coeffs); qreal* d_exponents; hipMalloc(&d_exponents, mem_exponents); int* d_numQubitsPerReg; hipMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg); int* d_numTermsPerReg; hipMalloc(&d_numTermsPerReg, mem_numTermsPerReg); long long int* d_overrideInds; hipMalloc(&d_overrideInds, mem_overrideInds); qreal* d_overridePhases; hipMalloc(&d_overridePhases, mem_overridePhases); // copy function args into GPU memory hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice); hipMemcpy(d_coeffs, coeffs, mem_coeffs, hipMemcpyHostToDevice); hipMemcpy(d_exponents, exponents, mem_exponents, hipMemcpyHostToDevice); hipMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, hipMemcpyHostToDevice); hipMemcpy(d_numTermsPerReg, numTermsPerReg, mem_numTermsPerReg, hipMemcpyHostToDevice); hipMemcpy(d_overrideInds, overrideInds, mem_overrideInds, hipMemcpyHostToDevice); hipMemcpy(d_overridePhases, overridePhases, mem_overridePhases, hipMemcpyHostToDevice); int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); // allocate thread-local working space {phaseInds} long long int *d_phaseInds; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; hipMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds); // call kernel hipLaunchKernelGGL(( statevec_applyMultiVarPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding, d_coeffs, d_exponents, d_numTermsPerReg, d_overrideInds, d_overridePhases, numOverrides, d_phaseInds, conj); // free device memory hipFree(d_qubits); hipFree(d_coeffs); hipFree(d_exponents); hipFree(d_numQubitsPerReg); hipFree(d_numTermsPerReg); hipFree(d_overrideInds); hipFree(d_overridePhases); hipFree(d_phaseInds); } __global__ void statevec_applyParamNamedPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, enum phaseFunc phaseFuncName, qreal* params, int numParams, long long int* overrideInds, qreal* overridePhases, int numOverrides, long long int* phaseInds, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; /* * each thread needs to write to a local: * long long int phaseInds[numRegs]; * but instead has access to shared array phaseInds, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine phase indices if (encoding == UNSIGNED) { int flatInd = 0; for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); } } else if (encoding == TWOS_COMPLEMENT) { int flatInd = 0; for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]-1; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); // use final qubit to indicate sign if (extractBit(qubits[flatInd++], globalAmpInd) == 1) phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1)); } } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) { int found = 1; for (int r=0; r<numRegs; r++) { if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) { found = 0; break; } } if (found) break; } // compute the phase (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else { // compute norm related phases if (phaseFuncName == NORM || phaseFuncName == INVERSE_NORM || phaseFuncName == SCALED_NORM || phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) { qreal norm = 0; if (phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) { for (int r=0; r<numRegs; r++) { qreal dif = phaseInds[r*stride+offset] - params[2+r]; norm += dif*dif; } } else for (int r=0; r<numRegs; r++) norm += phaseInds[r*stride+offset]*phaseInds[r*stride+offset]; norm = sqrt(norm); if (phaseFuncName == NORM) phase = norm; else if (phaseFuncName == INVERSE_NORM) phase = (norm == 0.)? params[0] : 1/norm; // smallest non-zero norm is 1 else if (phaseFuncName == SCALED_NORM) phase = params[0] * norm; else if (phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) phase = (norm <= REAL_EPS)? params[1] : params[0] / norm; // unless shifted closer to zero } // compute product related phases else if (phaseFuncName == PRODUCT || phaseFuncName == INVERSE_PRODUCT || phaseFuncName == SCALED_PRODUCT || phaseFuncName == SCALED_INVERSE_PRODUCT) { qreal prod = 1; for (int r=0; r<numRegs; r++) prod *= phaseInds[r*stride+offset]; if (phaseFuncName == PRODUCT) phase = prod; else if (phaseFuncName == INVERSE_PRODUCT) phase = (prod == 0.)? params[0] : 1/prod; // smallest non-zero prod is +- 1 else if (phaseFuncName == SCALED_PRODUCT) phase = params[0] * prod; else if (phaseFuncName == SCALED_INVERSE_PRODUCT) phase = (prod == 0.)? params[1] : params[0] / prod; } // compute Euclidean distance related phases else if (phaseFuncName == DISTANCE || phaseFuncName == INVERSE_DISTANCE || phaseFuncName == SCALED_DISTANCE || phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) { qreal dist = 0; if (phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) { for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[r*stride+offset] - phaseInds[(r+1)*stride+offset] - params[2+r/2]); dist += dif*dif; } } else if (phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) { for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[r*stride+offset] - phaseInds[(r+1)*stride+offset] - params[2+r+1]); dist += params[2+r] * dif*dif; } } else for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset]); dist += dif*dif; } // if sqrt() arg would be negative, set it to divergence param if (dist < 0) dist = 0; dist = sqrt(dist); if (phaseFuncName == DISTANCE) phase = dist; else if (phaseFuncName == INVERSE_DISTANCE) phase = (dist == 0.)? params[0] : 1/dist; // smallest non-zero dist is 1 else if (phaseFuncName == SCALED_DISTANCE) phase = params[0] * dist; else if (phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) phase = (dist <= REAL_EPS)? params[1] : params[0] / dist; // unless shifted closer } } // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyParamNamedPhaseFuncOverrides( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, enum phaseFunc phaseFuncName, qreal* params, int numParams, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // determine size of arrays, for cloning into GPU memory size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg; size_t mem_overridePhases = numOverrides * sizeof *overridePhases; size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds; size_t mem_params = numParams * sizeof *params; size_t mem_qubits = 0; for (int r=0; r<numRegs; r++) mem_qubits += numQubitsPerReg[r] * sizeof *qubits; // allocate global GPU memory int* d_qubits; hipMalloc(&d_qubits, mem_qubits); int* d_numQubitsPerReg; hipMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg); long long int* d_overrideInds; hipMalloc(&d_overrideInds, mem_overrideInds); qreal* d_overridePhases; hipMalloc(&d_overridePhases, mem_overridePhases); qreal* d_params = NULL; if (numParams > 0) hipMalloc(&d_params, mem_params); // copy function args into GPU memory hipMemcpy(d_qubits, qubits, mem_qubits, hipMemcpyHostToDevice); hipMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, hipMemcpyHostToDevice); hipMemcpy(d_overrideInds, overrideInds, mem_overrideInds, hipMemcpyHostToDevice); hipMemcpy(d_overridePhases, overridePhases, mem_overridePhases, hipMemcpyHostToDevice); if (numParams > 0) hipMemcpy(d_params, params, mem_params, hipMemcpyHostToDevice); int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); // allocate thread-local working space {phaseInds} long long int *d_phaseInds; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; hipMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds); // call kernel hipLaunchKernelGGL(( statevec_applyParamNamedPhaseFuncOverridesKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding, phaseFuncName, d_params, numParams, d_overrideInds, d_overridePhases, numOverrides, d_phaseInds, conj); // free device memory hipFree(d_qubits); hipFree(d_numQubitsPerReg); hipFree(d_overrideInds); hipFree(d_overridePhases); hipFree(d_phaseInds); if (numParams > 0) hipFree(d_params); } __global__ void densmatr_setQuregToPauliHamilKernel( Qureg qureg, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms ) { long long int n = blockIdx.x*blockDim.x + threadIdx.x; if (n>=qureg.numAmpsPerChunk) return; // flattened {I,X,Y,Z} matrix elements, where [k] = [p][i][j] const int pauliRealElems[] = { 1,0, 0,1, 0,1, 1,0, 0,0, 0,0, 1,0, 0,-1 }; const int pauliImagElems[] = { 0,0, 0,0, 0,0, 0,0, 0,-1,1,0, 0,0, 0,0 }; // |n> = |c>|r> const int numQubits = qureg.numQubitsRepresented; const long long int r = n & ((1LL << numQubits) - 1); const long long int c = n >> numQubits; // new amplitude of |n> qreal elemRe = 0; qreal elemIm = 0; for (long long int t=0; t<numSumTerms; t++) { // pauliKronecker[r][c] = prod_q Pauli[q][q-th bit of r and c] int kronRe = 1; int kronIm = 0; long long int pInd = t * numQubits; for (int q=0; q<numQubits; q++) { // get element of Pauli matrix int i = (r >> q) & 1; int j = (c >> q) & 1; int p = (int) pauliCodes[pInd++]; int k = (p<<2) + (i<<1) + j; int pauliRe = pauliRealElems[k]; int pauliIm = pauliImagElems[k]; // kron *= pauli int tmp = (pauliRe*kronRe) - (pauliIm*kronIm); kronIm = (pauliRe*kronIm) + (pauliIm*kronRe); kronRe = tmp; } // elem = sum_t coeffs[t] pauliKronecker[r][c] elemRe += termCoeffs[t] * kronRe; elemIm += termCoeffs[t] * kronIm; } // overwrite the density matrix entry qureg.deviceStateVec.real[n] = elemRe; qureg.deviceStateVec.imag[n] = elemIm; } void densmatr_setQuregToPauliHamil(Qureg qureg, PauliHamil hamil) { // copy hamil into GPU memory enum pauliOpType* d_pauliCodes; size_t mem_pauliCodes = hamil.numSumTerms * hamil.numQubits * sizeof *d_pauliCodes; hipMalloc(&d_pauliCodes, mem_pauliCodes); hipMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, hipMemcpyHostToDevice); qreal* d_termCoeffs; size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs; hipMalloc(&d_termCoeffs, mem_termCoeffs); hipMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, hipMemcpyHostToDevice); int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); hipLaunchKernelGGL(( densmatr_setQuregToPauliHamilKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, qureg, d_pauliCodes, d_termCoeffs, hamil.numSumTerms); // free tmp GPU memory hipFree(d_pauliCodes); hipFree(d_termCoeffs); } void seedQuEST(QuESTEnv *env, unsigned long int *seedArray, int numSeeds) { // free existing seed array, if exists if (env->seeds != NULL) free(env->seeds); // record keys in permanent heap env->seeds = (unsigned long int*) malloc(numSeeds * sizeof *(env->seeds)); for (int i=0; i<numSeeds; i++) (env->seeds)[i] = seedArray[i]; env->numSeeds = numSeeds; // pass keys to Mersenne Twister seeder init_by_array(seedArray, numSeeds); } #ifdef __cplusplus } #endif
e45246684cebdb926777c8e7b4ec2cf4bd132148.cu
// Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. * * @author Ania Brown * @author Tyson Jones */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_validation.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> #ifdef USE_HIP // Translate CUDA calls into HIP calls #include "cuda_to_hip.h" #endif # define REDUCE_SHARED_SIZE 512 # define DEBUG 0 /* * struct types for concisely passing unitaries to kernels */ // hide these from doxygen /// \cond HIDDEN_SYMBOLS typedef struct ArgMatrix2 { Complex r0c0, r0c1; Complex r1c0, r1c1; } ArgMatrix2; typedef struct ArgMatrix4 { Complex r0c0, r0c1, r0c2, r0c3; Complex r1c0, r1c1, r1c2, r1c3; Complex r2c0, r2c1, r2c2, r2c3; Complex r3c0, r3c1, r3c2, r3c3; } ArgMatrix4; ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) { ArgMatrix2 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; return a; } ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) { ArgMatrix4 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2]; a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2]; a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3]; a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0]; a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1]; a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2]; a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3]; a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0]; a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1]; a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2]; a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3]; return a; } /// \endcond /* * in-kernel bit twiddling functions */ __forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } __forceinline__ __device__ int getBitMaskParity(long long int mask) { int parity = 0; while (mask) { parity = !parity; mask = mask & (mask-1); } return parity; } __forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) { return (number ^ (1LL << bitInd)); } __forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) { long long int left, right; left = (number >> index) << index; right = number - left; return (left << 1) ^ right; } __forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) { int small = (bit1 < bit2)? bit1 : bit2; int big = (bit1 < bit2)? bit2 : bit1; return insertZeroBit(insertZeroBit(number, small), big); } __forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) { /* inserted bit inds must strictly increase, so that their final indices are correct. * in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already * memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each * at each insert. recall every element of inds (a positive or zero number) is unique. * This function won't appear in the CPU code, which can use C99 variable-size arrays and * ought to make a sorted array before threading */ int curMin = inds[0]; int prevMin = -1; for (int n=0; n < numInds; n++) { // find next min for (int t=0; t < numInds; t++) if (inds[t]>prevMin && inds[t]<curMin) curMin = inds[t]; number = insertZeroBit(number, curMin); // set curMin to an arbitrary non-visited elem prevMin = curMin; for (int t=0; t < numInds; t++) if (inds[t] > curMin) { curMin = inds[t]; break; } } return number; } /* * state vector and density matrix operations */ #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { cudaDeviceSynchronize(); cudaMemcpy( qureg.deviceStateVec.real + startInd, reals, numAmps * sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy( qureg.deviceStateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec cudaDeviceSynchronize(); cudaMemcpy( targetQureg.deviceStateVec.real, copyQureg.deviceStateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)), cudaMemcpyDeviceToDevice); cudaMemcpy( targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)), cudaMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( copyQureg.numAmpsPerChunk, targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag, copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, probFactor, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // check cpu memory allocation was successful validateQuregAllocation(qureg, env, __func__); // allocate GPU memory cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real))); cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag))); cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); // check gpu memory allocation was successful validateQuregGPUAllocation(qureg, env, __func__); } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory free(qureg.stateVec.real); free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory cudaFree(qureg.deviceStateVec.real); cudaFree(qureg.deviceStateVec.imag); cudaFree(qureg.firstLevelReduction); cudaFree(qureg.secondLevelReduction); } DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) { DiagonalOp op; op.numQubits = numQubits; op.numElemsPerChunk = (1LL << numQubits) / env.numRanks; op.chunkId = env.rank; op.numChunks = env.numRanks; // allocate CPU memory (initialised to zero) op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); // @TODO no handling of rank>1 allocation (no distributed GPU) // check cpu memory allocation was successful validateDiagonalOpAllocation(&op, env, __func__); // allocate GPU memory size_t arrSize = op.numElemsPerChunk * sizeof(qreal); cudaMalloc(&(op.deviceOperator.real), arrSize); cudaMalloc(&(op.deviceOperator.imag), arrSize); // check gpu memory allocation was successful validateDiagonalOpGPUAllocation(&op, env, __func__); // initialise GPU memory to zero cudaMemset(op.deviceOperator.real, 0, arrSize); cudaMemset(op.deviceOperator.imag, 0, arrSize); return op; } void agnostic_destroyDiagonalOp(DiagonalOp op) { free(op.real); free(op.imag); cudaFree(op.deviceOperator.real); cudaFree(op.deviceOperator.imag); } void agnostic_syncDiagonalOp(DiagonalOp op) { cudaDeviceSynchronize(); size_t mem_elems = op.numElemsPerChunk * sizeof *op.real; cudaMemcpy(op.deviceOperator.real, op.real, mem_elems, cudaMemcpyHostToDevice); cudaMemcpy(op.deviceOperator.imag, op.imag, mem_elems, cudaMemcpyHostToDevice); } __global__ void agnostic_initDiagonalOpFromPauliHamilKernel( DiagonalOp op, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms ) { // each thread processes one diagonal element long long int elemInd = blockIdx.x*blockDim.x + threadIdx.x; if (elemInd >= op.numElemsPerChunk) return; qreal elem = 0; // elem is (+-) every coefficient, with sign determined by parity for (int t=0; t<numSumTerms; t++) { // determine the parity of the Z-targeted qubits in the element's corresponding state int isOddNumOnes = 0; for (int q=0; q<op.numQubits; q++) if (pauliCodes[q + t*op.numQubits] == PAULI_Z) if (extractBit(q, elemInd)) isOddNumOnes = !isOddNumOnes; // avoid warp divergence int sign = 1 - 2*isOddNumOnes; // (-1 if isOddNumOnes, else +1) elem += termCoeffs[t] * sign; } op.deviceOperator.real[elemInd] = elem; op.deviceOperator.imag[elemInd] = 0; } void agnostic_initDiagonalOpFromPauliHamil(DiagonalOp op, PauliHamil hamil) { // copy args intop GPU memory enum pauliOpType* d_pauliCodes; size_t mem_pauliCodes = hamil.numSumTerms * op.numQubits * sizeof *d_pauliCodes; cudaMalloc(&d_pauliCodes, mem_pauliCodes); cudaMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, cudaMemcpyHostToDevice); qreal* d_termCoeffs; size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs; cudaMalloc(&d_termCoeffs, mem_termCoeffs); cudaMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, cudaMemcpyHostToDevice); int numThreadsPerBlock = 128; int numBlocks = ceil(op.numElemsPerChunk / (qreal) numThreadsPerBlock); agnostic_initDiagonalOpFromPauliHamilKernel<<<numBlocks, numThreadsPerBlock>>>( op, d_pauliCodes, d_termCoeffs, hamil.numSumTerms); // copy populated operator into to RAM cudaDeviceSynchronize(); size_t mem_elems = op.numElemsPerChunk * sizeof *op.real; cudaMemcpy(op.real, op.deviceOperator.real, mem_elems, cudaMemcpyDeviceToHost); cudaMemcpy(op.imag, op.deviceOperator.imag, mem_elems, cudaMemcpyDeviceToHost); cudaFree(d_pauliCodes); cudaFree(d_termCoeffs); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct cudaDeviceProp properties; cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount); if (cudaResultCode != cudaSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { cudaGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { validateGPUExists(GPUExists(), __func__); QuESTEnv env; env.rank=0; env.numRanks=1; env.seeds = NULL; env.numSeeds = 0; seedQuESTDefault(&env); return env; } void syncQuESTEnv(QuESTEnv env){ cudaDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ free(env.seeds); } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, char str[200]){ // OpenMP can be hybridised with GPU in future, so this check is safe and worthwhile int ompStatus=0; int numThreads=1; # ifdef _OPENMP ompStatus=1; numThreads=omp_get_max_threads(); # endif // there is no reporting of CUDA cores/threads/blocks currently (since non-trivial) sprintf(str, "CUDA=1 OpenMP=%d MPI=0 threads=%d ranks=1", ompStatus, numThreads); } void copyStateToGPU(Qureg qureg) { if (DEBUG) printf("Copying data to GPU\n"); cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { cudaDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag, qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } void statevec_copySubstateToGPU(Qureg qureg, long long int startInd, long long int numAmps) { if (DEBUG) printf("Copying data to GPU\n"); cudaMemcpy(&(qureg.deviceStateVec.real[startInd]), &(qureg.stateVec.real[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy(&(qureg.deviceStateVec.imag[startInd]), &(qureg.stateVec.imag[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); if (DEBUG) printf("Finished copying data to GPU\n"); } void statevec_copySubstateFromGPU(Qureg qureg, long long int startInd, long long int numAmps) { cudaDeviceSynchronize(); if (DEBUG) printf("Copying data from GPU\n"); cudaMemcpy(&(qureg.stateVec.real[startInd]), &(qureg.deviceStateVec.real[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); cudaMemcpy(&(qureg.stateVec.imag[startInd]), &(qureg.deviceStateVec.imag[startInd]), numAmps*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; copyStateFromGPU(qureg); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ qreal el=0; cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]), sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); return el; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ qreal el=0; cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]), sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); return el; } __global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the statevector to be all-zeros index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } void statevec_initBlankState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initBlankStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 stateVecReal[0] = 1.0; stateVecImag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, stateInd); } __global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initDebugState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initDebugStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.deviceStateVec.real, qureg.deviceStateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ sscanf(line, REAL_SPECIFIER ", " REAL_SPECIFIER, &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; copyStateFromGPU(mq1); copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledMultiQubitUnitaryKernel( Qureg qureg, long long int ctrlMask, int* targs, int numTargs, qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps) { // decide the amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes if (thisTask>=numTasks) return; // find this task's start index (where all targs are 0) long long int ind00 = insertZeroBits(thisTask, targs, numTargs); // this task only modifies amplitudes if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; /* each thread needs: long long int ampInds[numAmps]; qreal reAmps[numAmps]; qreal imAmps[numAmps]; but instead has access to shared arrays, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine the indices and record values of target amps long long int ind; for (int i=0; i < numTargAmps; i++) { // get global index of current target qubit assignment ind = ind00; for (int t=0; t < numTargs; t++) if (extractBit(t, i)) ind = flipBit(ind, targs[t]); ampInds[i*stride+offset] = ind; reAmps [i*stride+offset] = reVec[ind]; imAmps [i*stride+offset] = imVec[ind]; } // update the amplitudes for (int r=0; r < numTargAmps; r++) { ind = ampInds[r*stride+offset]; reVec[ind] = 0; imVec[ind] = 0; for (int c=0; c < numTargAmps; c++) { qreal uReElem = uRe[c + r*numTargAmps]; qreal uImElem = uIm[c + r*numTargAmps]; reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem; imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem; } } } void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock); // allocate device space for global {targs} (length: numTargs) and populate int *d_targs; size_t targMemSize = numTargs * sizeof *d_targs; cudaMalloc(&d_targs, targMemSize); cudaMemcpy(d_targs, targs, targMemSize, cudaMemcpyHostToDevice); // flatten out the u.real and u.imag lists int uNumRows = (1 << u.numQubits); qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat); qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat); long long int i = 0; for (int r=0; r < uNumRows; r++) for (int c=0; c < uNumRows; c++) { uReFlat[i] = u.real[r][c]; uImFlat[i] = u.imag[r][c]; i++; } // allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate qreal* d_uRe; qreal* d_uIm; size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm cudaMalloc(&d_uRe, uMemSize); cudaMalloc(&d_uIm, uMemSize); cudaMemcpy(d_uRe, uReFlat, uMemSize, cudaMemcpyHostToDevice); cudaMemcpy(d_uIm, uImFlat, uMemSize, cudaMemcpyHostToDevice); // allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs) long long int *d_ampInds; qreal *d_reAmps; qreal *d_imAmps; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; int numTargAmps = uNumRows; cudaMalloc(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds); cudaMalloc(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps); cudaMalloc(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps); // call kernel statevec_multiControlledMultiQubitUnitaryKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps); // free kernel memory free(uReFlat); free(uImFlat); cudaFree(d_targs); cudaFree(d_uRe); cudaFree(d_uIm); cudaFree(d_ampInds); cudaFree(d_reAmps); cudaFree(d_imAmps); } __global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){ // decide the 4 amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes if (thisTask>=numTasks) return; qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; // find indices of amplitudes to modify (treat q1 as the least significant bit) long long int ind00, ind01, ind10, ind11; ind00 = insertTwoZeroBits(thisTask, q1, q2); // modify only if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; ind01 = flipBit(ind00, q1); ind10 = flipBit(ind00, q2); ind11 = flipBit(ind01, q2); // extract statevec amplitudes qreal re00, re01, re10, re11; qreal im00, im01, im10, im11; re00 = reVec[ind00]; im00 = imVec[ind00]; re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; re11 = reVec[ind11]; im11 = imVec[ind11]; // apply u * {amp00, amp01, amp10, amp11} reVec[ind00] = u.r0c0.real*re00 - u.r0c0.imag*im00 + u.r0c1.real*re01 - u.r0c1.imag*im01 + u.r0c2.real*re10 - u.r0c2.imag*im10 + u.r0c3.real*re11 - u.r0c3.imag*im11; imVec[ind00] = u.r0c0.imag*re00 + u.r0c0.real*im00 + u.r0c1.imag*re01 + u.r0c1.real*im01 + u.r0c2.imag*re10 + u.r0c2.real*im10 + u.r0c3.imag*re11 + u.r0c3.real*im11; reVec[ind01] = u.r1c0.real*re00 - u.r1c0.imag*im00 + u.r1c1.real*re01 - u.r1c1.imag*im01 + u.r1c2.real*re10 - u.r1c2.imag*im10 + u.r1c3.real*re11 - u.r1c3.imag*im11; imVec[ind01] = u.r1c0.imag*re00 + u.r1c0.real*im00 + u.r1c1.imag*re01 + u.r1c1.real*im01 + u.r1c2.imag*re10 + u.r1c2.real*im10 + u.r1c3.imag*re11 + u.r1c3.real*im11; reVec[ind10] = u.r2c0.real*re00 - u.r2c0.imag*im00 + u.r2c1.real*re01 - u.r2c1.imag*im01 + u.r2c2.real*re10 - u.r2c2.imag*im10 + u.r2c3.real*re11 - u.r2c3.imag*im11; imVec[ind10] = u.r2c0.imag*re00 + u.r2c0.real*im00 + u.r2c1.imag*re01 + u.r2c1.real*im01 + u.r2c2.imag*re10 + u.r2c2.real*im10 + u.r2c3.imag*re11 + u.r2c3.real*im11; reVec[ind11] = u.r3c0.real*re00 - u.r3c0.imag*im00 + u.r3c1.real*re01 - u.r3c1.imag*im01 + u.r3c2.real*re10 - u.r3c2.imag*im10 + u.r3c3.real*re11 - u.r3c3.imag*im11; imVec[ind11] = u.r3c0.imag*re00 + u.r3c0.real*im00 + u.r3c1.imag*re01 + u.r3c1.real*im01 + u.r3c2.imag*re10 + u.r3c2.real*im10 + u.r3c3.imag*re11 + u.r3c3.real*im11; } void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes statevec_multiControlledTwoQubitUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, q1, q2, argifyMatrix4(u)); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledUnitaryKernel( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ArgMatrix2 u ){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) { // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ComplexMatrix2 u ){ int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u)); } __global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask = getQubitBitMask(controlQubits, numControlQubits); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; int fac = getBitMaskParity(mask & index)? -1 : 1; qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiControlledMultiRotateZKernel(Qureg qureg, long long int ctrlMask, long long int targMask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; // amplitudes corresponding to control qubits not all-in-one are unmodified if (ctrlMask && ((ctrlMask & index) != ctrlMask)) return; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; // avoid warp divergence, setting fac = +- 1 int fac = 1-2*getBitMaskParity(targMask & index); qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiControlledMultiRotateZ(Qureg qureg, long long int ctrlMask, long long int targMask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledMultiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, targMask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; copyStateFromGPU(qureg); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask = getQubitBitMask(controlQubits, numControlQubits); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask); } __global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) { qreal *reVec = qureg.deviceStateVec.real; qreal *imVec = qureg.deviceStateVec.imag; long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int ind00, ind01, ind10; qreal re01, re10, im01, im10; // determine ind00 of |..0..0..>, |..0..1..> and |..1..0..> ind00 = insertTwoZeroBits(thisTask, qb1, qb2); ind01 = flipBit(ind00, qb1); ind10 = flipBit(ind00, qb2); // extract statevec amplitudes re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; // swap 01 and 10 amps reVec[ind01] = re10; reVec[ind10] = re01; imVec[ind01] = im10; imVec[ind10] = im01; } void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); statevec_swapQubitAmpsKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, qb1, qb2); } __global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit); } __global__ void statevec_multiControlledMultiQubitNotKernel(Qureg qureg, int ctrlMask, int targMask) { qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; // althouugh each thread swaps/updates two amplitudes, we still invoke one thread per amp long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= qureg.numAmpsPerChunk) return; // modify amplitudes only if control qubits are 1 for this state if (ctrlMask && ((ctrlMask & ampInd) != ctrlMask)) return; long long int mateInd = ampInd ^ targMask; // if the mate is lower index, another thread is handling it if (mateInd < ampInd) return; /* it may seem wasteful to spawn more threads than are needed, and abort * half of them due to the amp pairing above (and potentially abort * an exponential number due to ctrlMask). however, since we are moving * global memory directly in a potentially non-contiguous fashoin, this * method is likely to be memory bandwidth bottlenecked anyway */ qreal mateRe = stateRe[mateInd]; qreal mateIm = stateIm[mateInd]; // swap amp with mate stateRe[mateInd] = stateRe[ampInd]; stateIm[mateInd] = stateIm[ampInd]; stateRe[ampInd] = mateRe; stateIm[ampInd] = mateIm; } void statevec_multiControlledMultiQubitNot(Qureg qureg, int ctrlMask, int targMask) { int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); statevec_multiControlledMultiQubitNotKernel<<<numBlocks, numThreadsPerBlock>>>(qureg, ctrlMask, targMask); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit) { qreal stateProb=0; // 1-qubit edge-case breaks below loop logic if (qureg.numQubitsInStateVec == 1) { qreal amp; cudaMemcpy(&amp, qureg.deviceStateVec.real, sizeof(qreal), cudaMemcpyDeviceToHost); stateProb += amp*amp; cudaMemcpy(&amp, qureg.deviceStateVec.imag, sizeof(qreal), cudaMemcpyDeviceToHost); stateProb += amp*amp; return stateProb; } long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } // atomicAdd on floats/doubles isn't available on <6 CC devices, so we add it ourselves #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __global__ void statevec_calcProbOfAllOutcomesKernel( qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits ) { // each thread handles one amplitude (all amplitudes are involved) long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= qureg.numAmpsTotal) return; qreal prob = ( qureg.deviceStateVec.real[ampInd]*qureg.deviceStateVec.real[ampInd] + qureg.deviceStateVec.imag[ampInd]*qureg.deviceStateVec.imag[ampInd]); // each amplitude contributes to one outcome long long int outcomeInd = 0; for (int q=0; q<numQubits; q++) outcomeInd += extractBit(qubits[q], ampInd) * (1LL << q); // each thread atomically writes directly to the global output. // this beat block-heirarchal atomic reductions in both global and shared memory! atomicAdd(&outcomeProbs[outcomeInd], prob); } void statevec_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) { // copy qubits to GPU memory int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; cudaMalloc(&d_qubits, mem_qubits); cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice); // create one thread for every amplitude int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); // create global GPU array for outcomeProbs qreal* d_outcomeProbs; long long int numOutcomes = (1LL << numQubits); size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs; cudaMalloc(&d_outcomeProbs, mem_outcomeProbs); cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs); // populate per-block subarrays statevec_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>( d_outcomeProbs, qureg, d_qubits, numQubits); // copy outcomeProbs from GPU memory cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost); // free GPU memory cudaFree(d_qubits); cudaFree(d_outcomeProbs); } __global__ void densmatr_calcProbOfAllOutcomesKernel( qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits ) { // each thread handles one diagonal amplitude long long int diagInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numDiags = (1LL << qureg.numQubitsRepresented); if (diagInd >= numDiags) return; long long int flatInd = (1 + numDiags)*diagInd; qreal prob = qureg.deviceStateVec.real[flatInd]; // im[flatInd] assumed ~ 0 // each diagonal amplitude contributes to one outcome long long int outcomeInd = 0; for (int q=0; q<numQubits; q++) outcomeInd += extractBit(qubits[q], diagInd) * (1LL << q); // each thread atomically writes directly to the global output. // this beat block-heirarchal atomic reductions in both global and shared memory! atomicAdd(&outcomeProbs[outcomeInd], prob); } void densmatr_calcProbOfAllOutcomes(qreal* outcomeProbs, Qureg qureg, int* qubits, int numQubits) { // copy qubits to GPU memory int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; cudaMalloc(&d_qubits, mem_qubits); cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice); // create global array, with per-block subarrays int numThreadsPerBlock = 128; int numDiags = (1LL << qureg.numQubitsRepresented); int numBlocks = ceil(numDiags / (qreal) numThreadsPerBlock); // create global GPU array for outcomeProbs qreal* d_outcomeProbs; long long int numOutcomes = (1LL << numQubits); size_t mem_outcomeProbs = numOutcomes * sizeof *d_outcomeProbs; cudaMalloc(&d_outcomeProbs, mem_outcomeProbs); cudaMemset(d_outcomeProbs, 0, mem_outcomeProbs); // populate per-block subarrays densmatr_calcProbOfAllOutcomesKernel<<<numBlocks, numThreadsPerBlock>>>( d_outcomeProbs, qureg, d_qubits, numQubits); // copy outcomeProbs from GPU memory cudaMemcpy(outcomeProbs, d_outcomeProbs, mem_outcomeProbs, cudaMemcpyDeviceToHost); // free GPU memory cudaFree(d_qubits); cudaFree(d_outcomeProbs); } /** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */ __global__ void densmatr_calcInnerProductKernel( Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm qreal prod = ( a.deviceStateVec.real[index]*b.deviceStateVec.real[index] + a.deviceStateVec.imag[index]*b.deviceStateVec.imag[index]); // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = prod; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcInnerProduct(Qureg a, Qureg b) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = a.numAmpsTotal; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the terms in each block // arbitrarily store the reduction in the b qureg's array if (firstTime) { densmatr_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( a, b, a.numAmpsTotal, b.firstLevelReduction); firstTime = 0; } // sum the block terms else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( b.firstLevelReduction, b.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal innerprod; cudaMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return innerprod; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @todo could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.deviceStateVec.real, bra.deviceStateVec.imag, ket.deviceStateVec.real, ket.deviceStateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.deviceStateVec.real; qreal* densImag = dens.deviceStateVec.imag; qreal* vecReal = vec.deviceStateVec.real; qreal* vecImag = vec.deviceStateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel( qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm, long long int numAmpsToSum, qreal *reducedArray ) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; // compute this thread's sum term qreal difRe = aRe[index] - bRe[index]; qreal difIm = aIm[index] - bIm[index]; qreal term = difRe*difRe + difIm*difIm; // array of each thread's collected term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */ qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) { // we're summing the square of every term in (a-b) long long int numValuesToReduce = a.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block (store reduction temp values in a's reduction array) if (firstTime) { densmatr_calcHilbertSchmidtDistanceSquaredKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( a.deviceStateVec.real, a.deviceStateVec.imag, b.deviceStateVec.real, b.deviceStateVec.imag, numValuesToReduce, a.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( a.firstLevelReduction, a.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal trace; cudaMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); qreal sqrtTrace = sqrt(trace); return sqrtTrace; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.deviceStateVec.real; qreal *stateVecImag = qureg.deviceStateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>( outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb; combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd]; combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd]; } void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>( combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_mixDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) { long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) { if (dephase == 0) return; qreal dephFac = 1 - dephase; densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_mixTwoQubitDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixTwoQubitDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDampingKernel( qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = damping * ( vecReal[targetInd]); qreal imagAvDepol = damping * ( vecImag[targetInd]); vecReal[targetInd] *= 1 - damping; vecImag[targetInd] *= 1 - damping; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; } void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_mixDephasing(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) { if (damping == 0) return; qreal dephase = sqrt(1-damping); densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDampingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( damping, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_mixTwoQubitDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixTwoQubitDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } __global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numAmpsToVisit = qureg1.numAmpsPerChunk; if (ampInd >= numAmpsToVisit) return; qreal *vecRe1 = qureg1.deviceStateVec.real; qreal *vecIm1 = qureg1.deviceStateVec.imag; qreal *vecRe2 = qureg2.deviceStateVec.real; qreal *vecIm2 = qureg2.deviceStateVec.imag; qreal *vecReOut = out.deviceStateVec.real; qreal *vecImOut = out.deviceStateVec.imag; qreal facRe1 = fac1.real; qreal facIm1 = fac1.imag; qreal facRe2 = fac2.real; qreal facIm2 = fac2.imag; qreal facReOut = facOut.real; qreal facImOut = facOut.imag; qreal re1,im1, re2,im2, reOut,imOut; long long int index = ampInd; re1 = vecRe1[index]; im1 = vecIm1[index]; re2 = vecRe2[index]; im2 = vecIm2[index]; reOut = vecReOut[index]; imOut = vecImOut[index]; vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2); vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2); } void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int numAmpsToVisit = qureg1.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); statevec_setWeightedQuregKernel<<<CUDABlocks, threadsPerCUDABlock>>>( fac1, qureg1, fac2, qureg2, facOut, out ); } __global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask]; qreal d = opIm[thisTask]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op); } __global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; int opDim = (1 << op.numQubits); qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask % opDim]; qreal d = opIm[thisTask % opDim]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op); } __global__ void statevec_applySubDiagonalOpKernel(Qureg qureg, int* targets, int numTargets, qreal* opReals, qreal* opImags, int conjFac) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; long long int v = 0; for (int t=0; t<numTargets; t++) v |= extractBit(targets[t], index) << t; qreal elemRe = opReals[v]; qreal elemIm = opImags[v] * conjFac; qreal* stateRe = qureg.deviceStateVec.real; qreal* stateIm = qureg.deviceStateVec.imag; qreal ampRe = stateRe[index]; qreal ampIm = stateIm[index]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[index] = ampRe*elemRe - ampIm*elemIm; stateIm[index] = ampRe*elemIm + ampIm*elemRe; } void statevec_applySubDiagonalOp(Qureg qureg, int* targets, SubDiagonalOp op, int conj) { // copy targets to GPU memory int* d_targets; int numTargets = op.numQubits; size_t memTargets = numTargets * sizeof *d_targets; cudaMalloc(&d_targets, memTargets); cudaMemcpy(d_targets, targets, memTargets, cudaMemcpyHostToDevice); // copy op to GPU memory qreal* d_opReal; qreal* d_opImag; size_t memOp = op.numElems * sizeof *d_opReal; cudaMalloc(&d_opReal, memOp); cudaMalloc(&d_opImag, memOp); cudaMemcpy(d_opReal, op.real, memOp, cudaMemcpyHostToDevice); cudaMemcpy(d_opImag, op.imag, memOp, cudaMemcpyHostToDevice); // determine factor of imaginary components int conjFac = 1; if (conj) conjFac = -1; // launch kernels int threadsPerCUDABlock = 128; int CUDABlocks = ceil(qureg.numAmpsPerChunk / (qreal) threadsPerCUDABlock); statevec_applySubDiagonalOpKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, d_targets, numTargets, d_opReal, d_opImag, conjFac); // free temporary GPU memory cudaFree(d_targets); cudaFree(d_opReal); cudaFree(d_opImag); } /** computes either a real or imag term of |vec_i|^2 op_i */ __global__ void statevec_calcExpecDiagonalOpKernel( int getRealComp, qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // choose whether to calculate the real or imaginary term of the expec term qreal expecVal; if (getRealComp) expecVal = vecAbs * opReal[index]; else expecVal = vecAbs * opImag[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = expecVal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } __global__ void densmatr_calcExpecDiagonalOpKernel( int getRealComp, qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag, int numQubits, long long int numTermsToSum, qreal* reducedArray) { /** if the thread represents a diagonal op, then it computes either a * real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the * reduction array */ // index will identy one of the 2^Q diagonals to be summed long long int matInd = blockIdx.x*blockDim.x + threadIdx.x; if (matInd >= numTermsToSum) return; long long int diagSpacing = (1LL << numQubits) + 1LL; int isDiag = ((matInd % diagSpacing) == 0); long long int opInd = matInd / diagSpacing; qreal val = 0; if (isDiag) { qreal matRe = matReal[matInd]; qreal matIm = matImag[matInd]; qreal opRe = opReal[opInd]; qreal opIm = opImag[opInd]; // (matRe + matIm i)(opRe + opIm i) = // (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe) if (getRealComp) val = matRe * opRe - matIm * opIm; else val = matRe * opIm + matIm * opRe; } // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = val; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) { // update both RAM and VRAM, for consistency memcpy(&op.real[startInd], real, numElems * sizeof(qreal)); memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal)); cudaDeviceSynchronize(); cudaMemcpy( op.deviceOperator.real + startInd, real, numElems * sizeof(*(op.deviceOperator.real)), cudaMemcpyHostToDevice); cudaMemcpy( op.deviceOperator.imag + startInd, imag, numElems * sizeof(*(op.deviceOperator.imag)), cudaMemcpyHostToDevice); } __global__ void statevec_applyPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int numTerms, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; // determine phase index of {qubits} long long int phaseInd = 0LL; if (encoding == UNSIGNED) { for (int q=0; q<numQubits; q++) phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd); } else if (encoding == TWOS_COMPLEMENT) { for (int q=0; q<numQubits-1; q++) // use final qubit to indicate sign phaseInd += (1LL << q) * extractBit(qubits[q], globalAmpInd); if (extractBit(qubits[numQubits-1], globalAmpInd) == 1) phaseInd -= (1LL << (numQubits-1)); } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) if (phaseInd == overrideInds[i]) break; // determine phase from {coeffs}, {exponents} (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else for (int t=0; t<numTerms; t++) phase += coeffs[t] * pow((qreal) phaseInd, (qreal) exponents[t]); // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyPhaseFuncOverrides( Qureg qureg, int* qubits, int numQubits, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int numTerms, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // allocate device space for global list of {qubits}, {coeffs}, {exponents}, {overrideInds} and {overridePhases} int* d_qubits; size_t mem_qubits = numQubits * sizeof *d_qubits; qreal* d_coeffs; size_t mem_terms = numTerms * sizeof *d_coeffs; qreal* d_exponents; long long int* d_overrideInds; size_t mem_inds = numOverrides * sizeof *d_overrideInds; qreal* d_overridePhases; size_t mem_phas = numOverrides * sizeof *d_overridePhases; cudaMalloc(&d_qubits, mem_qubits); cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice); cudaMalloc(&d_coeffs, mem_terms); cudaMemcpy(d_coeffs, coeffs, mem_terms, cudaMemcpyHostToDevice); cudaMalloc(&d_exponents, mem_terms); cudaMemcpy(d_exponents, exponents, mem_terms, cudaMemcpyHostToDevice); cudaMalloc(&d_overrideInds, mem_inds); cudaMemcpy(d_overrideInds, overrideInds, mem_inds, cudaMemcpyHostToDevice); cudaMalloc(&d_overridePhases,mem_phas); cudaMemcpy(d_overridePhases, overridePhases, mem_phas, cudaMemcpyHostToDevice); // call kernel int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); statevec_applyPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, d_qubits, numQubits, encoding, d_coeffs, d_exponents, numTerms, d_overrideInds, d_overridePhases, numOverrides, conj); // cleanup device memory cudaFree(d_qubits); cudaFree(d_coeffs); cudaFree(d_exponents); cudaFree(d_overrideInds); cudaFree(d_overridePhases); } __global__ void statevec_applyMultiVarPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int* numTermsPerReg, long long int* overrideInds, qreal* overridePhases, int numOverrides, long long int *phaseInds, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; /* * each thread needs to write to a local: * long long int phaseInds[numRegs]; * but instead has access to shared array phaseInds, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine phase indices int flatInd = 0; if (encoding == UNSIGNED) { for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); } } else if (encoding == TWOS_COMPLEMENT) { for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]-1; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); // use final qubit to indicate sign if (extractBit(qubits[flatInd++], globalAmpInd) == 1) phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1)); } } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) { int found = 1; for (int r=0; r<numRegs; r++) { if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) { found = 0; break; } } if (found) break; } // compute the phase (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else { flatInd = 0; for (int r=0; r<numRegs; r++) { for (int t=0; t<numTermsPerReg[r]; t++) { phase += coeffs[flatInd] * pow((qreal) phaseInds[r*stride+offset], (qreal) exponents[flatInd]); flatInd++; } } } // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyMultiVarPhaseFuncOverrides( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, qreal* coeffs, qreal* exponents, int* numTermsPerReg, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // determine size of arrays, for cloning into GPU memory size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg; size_t mem_numTermsPerReg = numRegs * sizeof *numTermsPerReg; size_t mem_overridePhases = numOverrides * sizeof *overridePhases; size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds; size_t mem_qubits = 0; size_t mem_coeffs = 0; size_t mem_exponents = 0; for (int r=0; r<numRegs; r++) { mem_qubits += numQubitsPerReg[r] * sizeof *qubits; mem_coeffs += numTermsPerReg[r] * sizeof *coeffs; mem_exponents += numTermsPerReg[r] * sizeof *exponents; } // allocate global GPU memory int* d_qubits; cudaMalloc(&d_qubits, mem_qubits); qreal* d_coeffs; cudaMalloc(&d_coeffs, mem_coeffs); qreal* d_exponents; cudaMalloc(&d_exponents, mem_exponents); int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg); int* d_numTermsPerReg; cudaMalloc(&d_numTermsPerReg, mem_numTermsPerReg); long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds); qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases); // copy function args into GPU memory cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice); cudaMemcpy(d_coeffs, coeffs, mem_coeffs, cudaMemcpyHostToDevice); cudaMemcpy(d_exponents, exponents, mem_exponents, cudaMemcpyHostToDevice); cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice); cudaMemcpy(d_numTermsPerReg, numTermsPerReg, mem_numTermsPerReg, cudaMemcpyHostToDevice); cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice); cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice); int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); // allocate thread-local working space {phaseInds} long long int *d_phaseInds; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds); // call kernel statevec_applyMultiVarPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding, d_coeffs, d_exponents, d_numTermsPerReg, d_overrideInds, d_overridePhases, numOverrides, d_phaseInds, conj); // free device memory cudaFree(d_qubits); cudaFree(d_coeffs); cudaFree(d_exponents); cudaFree(d_numQubitsPerReg); cudaFree(d_numTermsPerReg); cudaFree(d_overrideInds); cudaFree(d_overridePhases); cudaFree(d_phaseInds); } __global__ void statevec_applyParamNamedPhaseFuncOverridesKernel( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, enum phaseFunc phaseFuncName, qreal* params, int numParams, long long int* overrideInds, qreal* overridePhases, int numOverrides, long long int* phaseInds, int conj ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=qureg.numAmpsPerChunk) return; // determine global amplitude index (non-distributed, so it's just local index) long long int globalAmpInd = index; /* * each thread needs to write to a local: * long long int phaseInds[numRegs]; * but instead has access to shared array phaseInds, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine phase indices if (encoding == UNSIGNED) { int flatInd = 0; for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); } } else if (encoding == TWOS_COMPLEMENT) { int flatInd = 0; for (int r=0; r<numRegs; r++) { phaseInds[r*stride+offset] = 0LL; for (int q=0; q<numQubitsPerReg[r]-1; q++) phaseInds[r*stride+offset] += (1LL << q) * extractBit(qubits[flatInd++], globalAmpInd); // use final qubit to indicate sign if (extractBit(qubits[flatInd++], globalAmpInd) == 1) phaseInds[r*stride+offset] -= (1LL << (numQubitsPerReg[r]-1)); } } // determine if this phase index has an overriden value (i < numOverrides) int i; for (i=0; i<numOverrides; i++) { int found = 1; for (int r=0; r<numRegs; r++) { if (phaseInds[r*stride+offset] != overrideInds[i*numRegs+r]) { found = 0; break; } } if (found) break; } // compute the phase (unless overriden) qreal phase = 0; if (i < numOverrides) phase = overridePhases[i]; else { // compute norm related phases if (phaseFuncName == NORM || phaseFuncName == INVERSE_NORM || phaseFuncName == SCALED_NORM || phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) { qreal norm = 0; if (phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) { for (int r=0; r<numRegs; r++) { qreal dif = phaseInds[r*stride+offset] - params[2+r]; norm += dif*dif; } } else for (int r=0; r<numRegs; r++) norm += phaseInds[r*stride+offset]*phaseInds[r*stride+offset]; norm = sqrt(norm); if (phaseFuncName == NORM) phase = norm; else if (phaseFuncName == INVERSE_NORM) phase = (norm == 0.)? params[0] : 1/norm; // smallest non-zero norm is 1 else if (phaseFuncName == SCALED_NORM) phase = params[0] * norm; else if (phaseFuncName == SCALED_INVERSE_NORM || phaseFuncName == SCALED_INVERSE_SHIFTED_NORM) phase = (norm <= REAL_EPS)? params[1] : params[0] / norm; // unless shifted closer to zero } // compute product related phases else if (phaseFuncName == PRODUCT || phaseFuncName == INVERSE_PRODUCT || phaseFuncName == SCALED_PRODUCT || phaseFuncName == SCALED_INVERSE_PRODUCT) { qreal prod = 1; for (int r=0; r<numRegs; r++) prod *= phaseInds[r*stride+offset]; if (phaseFuncName == PRODUCT) phase = prod; else if (phaseFuncName == INVERSE_PRODUCT) phase = (prod == 0.)? params[0] : 1/prod; // smallest non-zero prod is +- 1 else if (phaseFuncName == SCALED_PRODUCT) phase = params[0] * prod; else if (phaseFuncName == SCALED_INVERSE_PRODUCT) phase = (prod == 0.)? params[1] : params[0] / prod; } // compute Euclidean distance related phases else if (phaseFuncName == DISTANCE || phaseFuncName == INVERSE_DISTANCE || phaseFuncName == SCALED_DISTANCE || phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) { qreal dist = 0; if (phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE) { for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[r*stride+offset] - phaseInds[(r+1)*stride+offset] - params[2+r/2]); dist += dif*dif; } } else if (phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) { for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[r*stride+offset] - phaseInds[(r+1)*stride+offset] - params[2+r+1]); dist += params[2+r] * dif*dif; } } else for (int r=0; r<numRegs; r+=2) { qreal dif = (phaseInds[(r+1)*stride+offset] - phaseInds[r*stride+offset]); dist += dif*dif; } // if sqrt() arg would be negative, set it to divergence param if (dist < 0) dist = 0; dist = sqrt(dist); if (phaseFuncName == DISTANCE) phase = dist; else if (phaseFuncName == INVERSE_DISTANCE) phase = (dist == 0.)? params[0] : 1/dist; // smallest non-zero dist is 1 else if (phaseFuncName == SCALED_DISTANCE) phase = params[0] * dist; else if (phaseFuncName == SCALED_INVERSE_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_DISTANCE || phaseFuncName == SCALED_INVERSE_SHIFTED_WEIGHTED_DISTANCE) phase = (dist <= REAL_EPS)? params[1] : params[0] / dist; // unless shifted closer } } // negate phase to conjugate operator if (conj) phase *= -1; // modify amp to amp * exp(i phase) qreal c = cos(phase); qreal s = sin(phase); qreal re = qureg.deviceStateVec.real[index]; qreal im = qureg.deviceStateVec.imag[index]; // = {re[amp] cos(phase) - im[amp] sin(phase)} + i {re[amp] sin(phase) + im[amp] cos(phase)} qureg.deviceStateVec.real[index] = re*c - im*s; qureg.deviceStateVec.imag[index] = re*s + im*c; } void statevec_applyParamNamedPhaseFuncOverrides( Qureg qureg, int* qubits, int* numQubitsPerReg, int numRegs, enum bitEncoding encoding, enum phaseFunc phaseFuncName, qreal* params, int numParams, long long int* overrideInds, qreal* overridePhases, int numOverrides, int conj ) { // determine size of arrays, for cloning into GPU memory size_t mem_numQubitsPerReg = numRegs * sizeof *numQubitsPerReg; size_t mem_overridePhases = numOverrides * sizeof *overridePhases; size_t mem_overrideInds = numOverrides * numRegs * sizeof *overrideInds; size_t mem_params = numParams * sizeof *params; size_t mem_qubits = 0; for (int r=0; r<numRegs; r++) mem_qubits += numQubitsPerReg[r] * sizeof *qubits; // allocate global GPU memory int* d_qubits; cudaMalloc(&d_qubits, mem_qubits); int* d_numQubitsPerReg; cudaMalloc(&d_numQubitsPerReg, mem_numQubitsPerReg); long long int* d_overrideInds; cudaMalloc(&d_overrideInds, mem_overrideInds); qreal* d_overridePhases; cudaMalloc(&d_overridePhases, mem_overridePhases); qreal* d_params = NULL; if (numParams > 0) cudaMalloc(&d_params, mem_params); // copy function args into GPU memory cudaMemcpy(d_qubits, qubits, mem_qubits, cudaMemcpyHostToDevice); cudaMemcpy(d_numQubitsPerReg, numQubitsPerReg, mem_numQubitsPerReg, cudaMemcpyHostToDevice); cudaMemcpy(d_overrideInds, overrideInds, mem_overrideInds, cudaMemcpyHostToDevice); cudaMemcpy(d_overridePhases, overridePhases, mem_overridePhases, cudaMemcpyHostToDevice); if (numParams > 0) cudaMemcpy(d_params, params, mem_params, cudaMemcpyHostToDevice); int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal) qureg.numAmpsPerChunk / threadsPerCUDABlock); // allocate thread-local working space {phaseInds} long long int *d_phaseInds; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; cudaMalloc(&d_phaseInds, numRegs*gridSize * sizeof *d_phaseInds); // call kernel statevec_applyParamNamedPhaseFuncOverridesKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, d_qubits, d_numQubitsPerReg, numRegs, encoding, phaseFuncName, d_params, numParams, d_overrideInds, d_overridePhases, numOverrides, d_phaseInds, conj); // free device memory cudaFree(d_qubits); cudaFree(d_numQubitsPerReg); cudaFree(d_overrideInds); cudaFree(d_overridePhases); cudaFree(d_phaseInds); if (numParams > 0) cudaFree(d_params); } __global__ void densmatr_setQuregToPauliHamilKernel( Qureg qureg, enum pauliOpType* pauliCodes, qreal* termCoeffs, int numSumTerms ) { long long int n = blockIdx.x*blockDim.x + threadIdx.x; if (n>=qureg.numAmpsPerChunk) return; // flattened {I,X,Y,Z} matrix elements, where [k] = [p][i][j] const int pauliRealElems[] = { 1,0, 0,1, 0,1, 1,0, 0,0, 0,0, 1,0, 0,-1 }; const int pauliImagElems[] = { 0,0, 0,0, 0,0, 0,0, 0,-1,1,0, 0,0, 0,0 }; // |n> = |c>|r> const int numQubits = qureg.numQubitsRepresented; const long long int r = n & ((1LL << numQubits) - 1); const long long int c = n >> numQubits; // new amplitude of |n> qreal elemRe = 0; qreal elemIm = 0; for (long long int t=0; t<numSumTerms; t++) { // pauliKronecker[r][c] = prod_q Pauli[q][q-th bit of r and c] int kronRe = 1; int kronIm = 0; long long int pInd = t * numQubits; for (int q=0; q<numQubits; q++) { // get element of Pauli matrix int i = (r >> q) & 1; int j = (c >> q) & 1; int p = (int) pauliCodes[pInd++]; int k = (p<<2) + (i<<1) + j; int pauliRe = pauliRealElems[k]; int pauliIm = pauliImagElems[k]; // kron *= pauli int tmp = (pauliRe*kronRe) - (pauliIm*kronIm); kronIm = (pauliRe*kronIm) + (pauliIm*kronRe); kronRe = tmp; } // elem = sum_t coeffs[t] pauliKronecker[r][c] elemRe += termCoeffs[t] * kronRe; elemIm += termCoeffs[t] * kronIm; } // overwrite the density matrix entry qureg.deviceStateVec.real[n] = elemRe; qureg.deviceStateVec.imag[n] = elemIm; } void densmatr_setQuregToPauliHamil(Qureg qureg, PauliHamil hamil) { // copy hamil into GPU memory enum pauliOpType* d_pauliCodes; size_t mem_pauliCodes = hamil.numSumTerms * hamil.numQubits * sizeof *d_pauliCodes; cudaMalloc(&d_pauliCodes, mem_pauliCodes); cudaMemcpy(d_pauliCodes, hamil.pauliCodes, mem_pauliCodes, cudaMemcpyHostToDevice); qreal* d_termCoeffs; size_t mem_termCoeffs = hamil.numSumTerms * sizeof *d_termCoeffs; cudaMalloc(&d_termCoeffs, mem_termCoeffs); cudaMemcpy(d_termCoeffs, hamil.termCoeffs, mem_termCoeffs, cudaMemcpyHostToDevice); int numThreadsPerBlock = 128; int numBlocks = ceil(qureg.numAmpsPerChunk / (qreal) numThreadsPerBlock); densmatr_setQuregToPauliHamilKernel<<<numBlocks, numThreadsPerBlock>>>( qureg, d_pauliCodes, d_termCoeffs, hamil.numSumTerms); // free tmp GPU memory cudaFree(d_pauliCodes); cudaFree(d_termCoeffs); } void seedQuEST(QuESTEnv *env, unsigned long int *seedArray, int numSeeds) { // free existing seed array, if exists if (env->seeds != NULL) free(env->seeds); // record keys in permanent heap env->seeds = (unsigned long int*) malloc(numSeeds * sizeof *(env->seeds)); for (int i=0; i<numSeeds; i++) (env->seeds)[i] = seedArray[i]; env->numSeeds = numSeeds; // pass keys to Mersenne Twister seeder init_by_array(seedArray, numSeeds); } #ifdef __cplusplus } #endif
dfc76c9e154c9ba893ec3a9d2537065574fee7f8.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } // Fisher ************************************ > template <typename Dtype> void BasePrefetchingLabelmapDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LabelmapBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); top[1]->ReshapeLike(batch->labelmap_); // Copy the labels. caffe_copy(batch->labelmap_.count(), batch->labelmap_.gpu_data(), top[1]->mutable_gpu_data()); // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } // < ************************************ INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); // Fisher ************************************ > INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingLabelmapDataLayer); // < ************************************ } // namespace caffe
dfc76c9e154c9ba893ec3a9d2537065574fee7f8.cu
#include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } // Fisher ************************************ > template <typename Dtype> void BasePrefetchingLabelmapDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { LabelmapBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); top[1]->ReshapeLike(batch->labelmap_); // Copy the labels. caffe_copy(batch->labelmap_.count(), batch->labelmap_.gpu_data(), top[1]->mutable_gpu_data()); // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } // < ************************************ INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); // Fisher ************************************ > INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingLabelmapDataLayer); // < ************************************ } // namespace caffe
cef78fdbd1bb88a53196dde39fbac9a531b51a77.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <sys/time.h> const int PARTITION_SIZE = 32; #define AT(mtx, width, row, column) \ mtx[(row) * (width) + (column)] inline double nowSec() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } __global__ void global_mmul (int *A, int *B, int *C, int N, int Ndev, int dev) { int NperDev = N/Ndev; int i = NperDev*(1+dev) -1 - (blockIdx.y * blockDim.y + threadIdx.y); int j = blockIdx.x * blockDim.x + threadIdx.x; int iAC = i % NperDev; int i_part = i % PARTITION_SIZE; int j_part = j % PARTITION_SIZE; int rowPerPart = N/PARTITION_SIZE; __shared__ int Apart[PARTITION_SIZE][PARTITION_SIZE]; __shared__ int Bpart[PARTITION_SIZE][PARTITION_SIZE]; AT(C, N, iAC, j) = 0; for (int n = 0; n < rowPerPart; n++) { Apart[i_part][j_part] = AT(A, N, iAC, n*PARTITION_SIZE + j_part); Bpart[i_part][j_part] = AT(B, N, n*PARTITION_SIZE + i_part, j); __syncthreads(); for (int k=0; k<PARTITION_SIZE; k++) AT(C, N, iAC, j) += Apart[i_part][k]*Bpart[k][j_part]; } } #ifdef PRINT void printMtx (int **m, int N, int width, int height) { for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { if (width > height) printf("%d\t", AT(m[i/height], width, i%height, j)); else printf("%d\t", AT(m[j/width], width, i, j%width)); } puts("\n"); } } #endif int main(int argc, char **argv) { if (argc != 2) { puts("Usage: Matrix_mult [N]\n"); return -1; } int N=atoi(argv[1]); int nDevices; hipGetDeviceCount(&nDevices); if (N % (PARTITION_SIZE*nDevices)) { printf ("error: N must be a multiple of PARTITION_SIZE*nDevices=%d\n", PARTITION_SIZE*nDevices); return -1; } unsigned NN=N*N; unsigned NNperDevice = NN/nDevices; unsigned NperDevice = N/nDevices; int Nblocks = N/PARTITION_SIZE; int NblocksPerDevice =Nblocks/nDevices; int **A_d, **B_d, **C_d; int **A_h, *B_h, **C_h; A_d = (int**)malloc(nDevices * sizeof(int*)); B_d = (int**)malloc(nDevices * sizeof(int*)); C_d = (int**)malloc(nDevices * sizeof(int*)); A_h = (int**)malloc(nDevices * sizeof(int*)); C_h = (int**)malloc(nDevices * sizeof(int*)); B_h = (int*)malloc(sizeof(int)*NN); for (int i=0; i<nDevices; i++) { A_h[i] = (int*)malloc(sizeof(int)*NNperDevice); C_h[i] = (int*)malloc(sizeof(int)*NNperDevice); hipSetDevice(i); hipMalloc(&A_d[i], sizeof(int)*NNperDevice); hipMalloc(&C_d[i], sizeof(int)*NNperDevice); hipMalloc(&B_d[i], sizeof(int)*NN); } hipDeviceSynchronize(); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { AT(A_h[i/NperDevice], N, i%NperDevice, j) = ((i == j) ? 1 : 0); AT(B_h, N, i, j) = i*N+j; } } for (int i=0; i<nDevices; i++) { hipSetDevice(i); hipMemcpy(B_d[i], B_h, NN*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(A_d[i], A_h[i], NNperDevice*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(C_d[i], C_h[i], NNperDevice*sizeof(int),hipMemcpyHostToDevice); } hipDeviceSynchronize(); dim3 blockPerGrid(Nblocks,NblocksPerDevice); dim3 threadPerBlock(PARTITION_SIZE,PARTITION_SIZE); double t_begin = nowSec(); for (int i=0; i<nDevices; i++) { hipSetDevice(i); hipLaunchKernelGGL(( global_mmul) , dim3(blockPerGrid), dim3(threadPerBlock) , 0, 0, A_d[i],B_d[i],C_d[i],N,nDevices,i); } hipDeviceSynchronize(); double t_end = nowSec(); for (int i=0; i<nDevices; i++) { hipSetDevice(i); hipMemcpy(C_h[i], C_d[i], NNperDevice*sizeof(int),hipMemcpyDeviceToHost); } hipDeviceSynchronize(); #ifdef PRINT fprintf(stderr,"A=\n"); printMtx(A_h, N, N, NperDevice); fprintf(stderr,"\n\nB=\n"); printMtx(&B_h, N, N, N); fprintf(stderr,"\n\nC=\n"); printMtx(C_h, N, N, NperDevice); fprintf(stderr,"\n"); #endif printf("Elapsed time: %f sec\n", t_end - t_begin); for (int i=0; i<nDevices; i++) { free(A_h[i]); free(C_h[i]); hipFree(A_d[i]); hipFree(B_d[i]); hipFree(C_d[i]); } free(A_h); free(A_d); free(B_h); free(B_d); free(C_h); free(C_d); return 0; }
cef78fdbd1bb88a53196dde39fbac9a531b51a77.cu
#include <cuda.h> #include <stdio.h> #include <math.h> #include <sys/time.h> const int PARTITION_SIZE = 32; #define AT(mtx, width, row, column) \ mtx[(row) * (width) + (column)] inline double nowSec() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } __global__ void global_mmul (int *A, int *B, int *C, int N, int Ndev, int dev) { int NperDev = N/Ndev; int i = NperDev*(1+dev) -1 - (blockIdx.y * blockDim.y + threadIdx.y); int j = blockIdx.x * blockDim.x + threadIdx.x; int iAC = i % NperDev; int i_part = i % PARTITION_SIZE; int j_part = j % PARTITION_SIZE; int rowPerPart = N/PARTITION_SIZE; __shared__ int Apart[PARTITION_SIZE][PARTITION_SIZE]; __shared__ int Bpart[PARTITION_SIZE][PARTITION_SIZE]; AT(C, N, iAC, j) = 0; for (int n = 0; n < rowPerPart; n++) { Apart[i_part][j_part] = AT(A, N, iAC, n*PARTITION_SIZE + j_part); Bpart[i_part][j_part] = AT(B, N, n*PARTITION_SIZE + i_part, j); __syncthreads(); for (int k=0; k<PARTITION_SIZE; k++) AT(C, N, iAC, j) += Apart[i_part][k]*Bpart[k][j_part]; } } #ifdef PRINT void printMtx (int **m, int N, int width, int height) { for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { if (width > height) printf("%d\t", AT(m[i/height], width, i%height, j)); else printf("%d\t", AT(m[j/width], width, i, j%width)); } puts("\n"); } } #endif int main(int argc, char **argv) { if (argc != 2) { puts("Usage: Matrix_mult [N]\n"); return -1; } int N=atoi(argv[1]); int nDevices; cudaGetDeviceCount(&nDevices); if (N % (PARTITION_SIZE*nDevices)) { printf ("error: N must be a multiple of PARTITION_SIZE*nDevices=%d\n", PARTITION_SIZE*nDevices); return -1; } unsigned NN=N*N; unsigned NNperDevice = NN/nDevices; unsigned NperDevice = N/nDevices; int Nblocks = N/PARTITION_SIZE; int NblocksPerDevice =Nblocks/nDevices; int **A_d, **B_d, **C_d; int **A_h, *B_h, **C_h; A_d = (int**)malloc(nDevices * sizeof(int*)); B_d = (int**)malloc(nDevices * sizeof(int*)); C_d = (int**)malloc(nDevices * sizeof(int*)); A_h = (int**)malloc(nDevices * sizeof(int*)); C_h = (int**)malloc(nDevices * sizeof(int*)); B_h = (int*)malloc(sizeof(int)*NN); for (int i=0; i<nDevices; i++) { A_h[i] = (int*)malloc(sizeof(int)*NNperDevice); C_h[i] = (int*)malloc(sizeof(int)*NNperDevice); cudaSetDevice(i); cudaMalloc(&A_d[i], sizeof(int)*NNperDevice); cudaMalloc(&C_d[i], sizeof(int)*NNperDevice); cudaMalloc(&B_d[i], sizeof(int)*NN); } cudaDeviceSynchronize(); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { AT(A_h[i/NperDevice], N, i%NperDevice, j) = ((i == j) ? 1 : 0); AT(B_h, N, i, j) = i*N+j; } } for (int i=0; i<nDevices; i++) { cudaSetDevice(i); cudaMemcpy(B_d[i], B_h, NN*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(A_d[i], A_h[i], NNperDevice*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(C_d[i], C_h[i], NNperDevice*sizeof(int),cudaMemcpyHostToDevice); } cudaDeviceSynchronize(); dim3 blockPerGrid(Nblocks,NblocksPerDevice); dim3 threadPerBlock(PARTITION_SIZE,PARTITION_SIZE); double t_begin = nowSec(); for (int i=0; i<nDevices; i++) { cudaSetDevice(i); global_mmul <<< blockPerGrid, threadPerBlock >>> (A_d[i],B_d[i],C_d[i],N,nDevices,i); } cudaDeviceSynchronize(); double t_end = nowSec(); for (int i=0; i<nDevices; i++) { cudaSetDevice(i); cudaMemcpy(C_h[i], C_d[i], NNperDevice*sizeof(int),cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); #ifdef PRINT fprintf(stderr,"A=\n"); printMtx(A_h, N, N, NperDevice); fprintf(stderr,"\n\nB=\n"); printMtx(&B_h, N, N, N); fprintf(stderr,"\n\nC=\n"); printMtx(C_h, N, N, NperDevice); fprintf(stderr,"\n"); #endif printf("Elapsed time: %f sec\n", t_end - t_begin); for (int i=0; i<nDevices; i++) { free(A_h[i]); free(C_h[i]); cudaFree(A_d[i]); cudaFree(B_d[i]); cudaFree(C_d[i]); } free(A_h); free(A_d); free(B_h); free(B_d); free(C_h); free(C_d); return 0; }
bdc37cacf18158fe0a68723840b0ef2381af9080.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/tree/flatnode.h> #include <cuml/tree/decisiontree.hpp> #include "decisiontree_impl_hip.cuh" namespace ML { namespace DecisionTree { void set_tree_params(DecisionTreeParams &params, int cfg_max_depth, int cfg_max_leaves, float cfg_max_features, int cfg_n_bins, int cfg_split_algo, int cfg_min_rows_per_node, float cfg_min_impurity_decrease, bool cfg_bootstrap_features, CRITERION cfg_split_criterion, bool cfg_quantile_per_tree, bool cfg_shuffle_features) { params.max_depth = cfg_max_depth; params.max_leaves = cfg_max_leaves; params.max_features = cfg_max_features; params.n_bins = cfg_n_bins; params.split_algo = cfg_split_algo; params.min_rows_per_node = cfg_min_rows_per_node; params.bootstrap_features = cfg_bootstrap_features; params.split_criterion = cfg_split_criterion; params.quantile_per_tree = cfg_quantile_per_tree; params.shuffle_features = cfg_shuffle_features; params.min_impurity_decrease = cfg_min_impurity_decrease; } void validity_check(const DecisionTreeParams params) { ASSERT((params.max_depth >= 0), "Invalid max depth %d", params.max_depth); ASSERT((params.max_leaves == -1) || (params.max_leaves > 0), "Invalid max leaves %d", params.max_leaves); ASSERT((params.max_features > 0) && (params.max_features <= 1.0), "max_features value %f outside permitted (0, 1] range", params.max_features); ASSERT((params.n_bins > 0), "Invalid n_bins %d", params.n_bins); ASSERT((params.split_algo >= 0) && (params.split_algo < SPLIT_ALGO::SPLIT_ALGO_END), "split_algo value %d outside permitted [0, %d) range", params.split_algo, SPLIT_ALGO::SPLIT_ALGO_END); ASSERT((params.min_rows_per_node >= 2), "Invalid min # rows per node value %d. Should be >= 2.", params.min_rows_per_node); } void print(const DecisionTreeParams params) { CUML_LOG_DEBUG("max_depth: %d", params.max_depth); CUML_LOG_DEBUG("max_leaves: %d", params.max_leaves); CUML_LOG_DEBUG("max_features: %f", params.max_features); CUML_LOG_DEBUG("n_bins: %d", params.n_bins); CUML_LOG_DEBUG("split_algo: %d", params.split_algo); CUML_LOG_DEBUG("min_rows_per_node: %d", params.min_rows_per_node); CUML_LOG_DEBUG("bootstrap_features: %d", params.bootstrap_features); CUML_LOG_DEBUG("split_criterion: %d", params.split_criterion); CUML_LOG_DEBUG("quantile_per_tree: %d", params.quantile_per_tree); CUML_LOG_DEBUG("shuffle_features: %d", params.shuffle_features); } template <class T, class L> void print_tree_summary(const TreeMetaDataNode<T, L> *tree) { CUML_LOG_INFO(" Decision Tree depth --> %d and n_leaves --> %d", tree->depth_counter, tree->leaf_counter); CUML_LOG_INFO(" Tree Fitting - Overall time --> %lf s", tree->prepare_time + tree->train_time); CUML_LOG_INFO(" - preparing for fit time: %lf s", tree->prepare_time); CUML_LOG_INFO(" - tree growing time: %lf s", tree->train_time); } template <class T, class L> void print_tree(const TreeMetaDataNode<T, L> *tree) { print_tree_summary<T, L>(tree); print_node<T, L>("", tree->sparsetree, 0, false); } void decisionTreeClassifierFit(const ML::cumlHandle &handle, TreeClassifierF *&tree, float *data, const int ncols, const int nrows, int *labels, unsigned int *rowids, const int n_sampled_rows, int unique_labels, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeClassifier<float>> dt_classifier = std::make_shared<DecisionTreeClassifier<float>>(); dt_classifier->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, unique_labels, tree, tree_params); } void decisionTreeClassifierFit(const ML::cumlHandle &handle, TreeClassifierD *&tree, double *data, const int ncols, const int nrows, int *labels, unsigned int *rowids, const int n_sampled_rows, int unique_labels, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeClassifier<double>> dt_classifier = std::make_shared<DecisionTreeClassifier<double>>(); dt_classifier->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, unique_labels, tree, tree_params); } void decisionTreeClassifierPredict(const ML::cumlHandle &handle, const TreeClassifierF *tree, const float *rows, const int n_rows, const int n_cols, int *predictions, int verbosity) { std::shared_ptr<DecisionTreeClassifier<float>> dt_classifier = std::make_shared<DecisionTreeClassifier<float>>(); dt_classifier->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } void decisionTreeClassifierPredict(const ML::cumlHandle &handle, const TreeClassifierD *tree, const double *rows, const int n_rows, const int n_cols, int *predictions, int verbosity) { std::shared_ptr<DecisionTreeClassifier<double>> dt_classifier = std::make_shared<DecisionTreeClassifier<double>>(); dt_classifier->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } // ----------------------------- Regression ----------------------------------- // void decisionTreeRegressorFit(const ML::cumlHandle &handle, TreeRegressorF *&tree, float *data, const int ncols, const int nrows, float *labels, unsigned int *rowids, const int n_sampled_rows, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeRegressor<float>> dt_regressor = std::make_shared<DecisionTreeRegressor<float>>(); dt_regressor->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, tree, tree_params); } void decisionTreeRegressorFit(const ML::cumlHandle &handle, TreeRegressorD *&tree, double *data, const int ncols, const int nrows, double *labels, unsigned int *rowids, const int n_sampled_rows, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeRegressor<double>> dt_regressor = std::make_shared<DecisionTreeRegressor<double>>(); dt_regressor->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, tree, tree_params); } void decisionTreeRegressorPredict(const ML::cumlHandle &handle, const TreeRegressorF *tree, const float *rows, const int n_rows, const int n_cols, float *predictions, int verbosity) { std::shared_ptr<DecisionTreeRegressor<float>> dt_regressor = std::make_shared<DecisionTreeRegressor<float>>(); dt_regressor->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } void decisionTreeRegressorPredict(const ML::cumlHandle &handle, const TreeRegressorD *tree, const double *rows, const int n_rows, const int n_cols, double *predictions, int verbosity) { std::shared_ptr<DecisionTreeRegressor<double>> dt_regressor = std::make_shared<DecisionTreeRegressor<double>>(); dt_regressor->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } // Functions' specializations template void print_tree_summary<float, int>(const TreeClassifierF *tree); template void print_tree_summary<double, int>(const TreeClassifierD *tree); template void print_tree_summary<float, float>(const TreeRegressorF *tree); template void print_tree_summary<double, double>(const TreeRegressorD *tree); template void print_tree<float, int>(const TreeClassifierF *tree); template void print_tree<double, int>(const TreeClassifierD *tree); template void print_tree<float, float>(const TreeRegressorF *tree); template void print_tree<double, double>(const TreeRegressorD *tree); } // End namespace DecisionTree } //End namespace ML
bdc37cacf18158fe0a68723840b0ef2381af9080.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/tree/flatnode.h> #include <cuml/tree/decisiontree.hpp> #include "decisiontree_impl.cuh" namespace ML { namespace DecisionTree { void set_tree_params(DecisionTreeParams &params, int cfg_max_depth, int cfg_max_leaves, float cfg_max_features, int cfg_n_bins, int cfg_split_algo, int cfg_min_rows_per_node, float cfg_min_impurity_decrease, bool cfg_bootstrap_features, CRITERION cfg_split_criterion, bool cfg_quantile_per_tree, bool cfg_shuffle_features) { params.max_depth = cfg_max_depth; params.max_leaves = cfg_max_leaves; params.max_features = cfg_max_features; params.n_bins = cfg_n_bins; params.split_algo = cfg_split_algo; params.min_rows_per_node = cfg_min_rows_per_node; params.bootstrap_features = cfg_bootstrap_features; params.split_criterion = cfg_split_criterion; params.quantile_per_tree = cfg_quantile_per_tree; params.shuffle_features = cfg_shuffle_features; params.min_impurity_decrease = cfg_min_impurity_decrease; } void validity_check(const DecisionTreeParams params) { ASSERT((params.max_depth >= 0), "Invalid max depth %d", params.max_depth); ASSERT((params.max_leaves == -1) || (params.max_leaves > 0), "Invalid max leaves %d", params.max_leaves); ASSERT((params.max_features > 0) && (params.max_features <= 1.0), "max_features value %f outside permitted (0, 1] range", params.max_features); ASSERT((params.n_bins > 0), "Invalid n_bins %d", params.n_bins); ASSERT((params.split_algo >= 0) && (params.split_algo < SPLIT_ALGO::SPLIT_ALGO_END), "split_algo value %d outside permitted [0, %d) range", params.split_algo, SPLIT_ALGO::SPLIT_ALGO_END); ASSERT((params.min_rows_per_node >= 2), "Invalid min # rows per node value %d. Should be >= 2.", params.min_rows_per_node); } void print(const DecisionTreeParams params) { CUML_LOG_DEBUG("max_depth: %d", params.max_depth); CUML_LOG_DEBUG("max_leaves: %d", params.max_leaves); CUML_LOG_DEBUG("max_features: %f", params.max_features); CUML_LOG_DEBUG("n_bins: %d", params.n_bins); CUML_LOG_DEBUG("split_algo: %d", params.split_algo); CUML_LOG_DEBUG("min_rows_per_node: %d", params.min_rows_per_node); CUML_LOG_DEBUG("bootstrap_features: %d", params.bootstrap_features); CUML_LOG_DEBUG("split_criterion: %d", params.split_criterion); CUML_LOG_DEBUG("quantile_per_tree: %d", params.quantile_per_tree); CUML_LOG_DEBUG("shuffle_features: %d", params.shuffle_features); } template <class T, class L> void print_tree_summary(const TreeMetaDataNode<T, L> *tree) { CUML_LOG_INFO(" Decision Tree depth --> %d and n_leaves --> %d", tree->depth_counter, tree->leaf_counter); CUML_LOG_INFO(" Tree Fitting - Overall time --> %lf s", tree->prepare_time + tree->train_time); CUML_LOG_INFO(" - preparing for fit time: %lf s", tree->prepare_time); CUML_LOG_INFO(" - tree growing time: %lf s", tree->train_time); } template <class T, class L> void print_tree(const TreeMetaDataNode<T, L> *tree) { print_tree_summary<T, L>(tree); print_node<T, L>("", tree->sparsetree, 0, false); } void decisionTreeClassifierFit(const ML::cumlHandle &handle, TreeClassifierF *&tree, float *data, const int ncols, const int nrows, int *labels, unsigned int *rowids, const int n_sampled_rows, int unique_labels, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeClassifier<float>> dt_classifier = std::make_shared<DecisionTreeClassifier<float>>(); dt_classifier->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, unique_labels, tree, tree_params); } void decisionTreeClassifierFit(const ML::cumlHandle &handle, TreeClassifierD *&tree, double *data, const int ncols, const int nrows, int *labels, unsigned int *rowids, const int n_sampled_rows, int unique_labels, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeClassifier<double>> dt_classifier = std::make_shared<DecisionTreeClassifier<double>>(); dt_classifier->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, unique_labels, tree, tree_params); } void decisionTreeClassifierPredict(const ML::cumlHandle &handle, const TreeClassifierF *tree, const float *rows, const int n_rows, const int n_cols, int *predictions, int verbosity) { std::shared_ptr<DecisionTreeClassifier<float>> dt_classifier = std::make_shared<DecisionTreeClassifier<float>>(); dt_classifier->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } void decisionTreeClassifierPredict(const ML::cumlHandle &handle, const TreeClassifierD *tree, const double *rows, const int n_rows, const int n_cols, int *predictions, int verbosity) { std::shared_ptr<DecisionTreeClassifier<double>> dt_classifier = std::make_shared<DecisionTreeClassifier<double>>(); dt_classifier->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } // ----------------------------- Regression ----------------------------------- // void decisionTreeRegressorFit(const ML::cumlHandle &handle, TreeRegressorF *&tree, float *data, const int ncols, const int nrows, float *labels, unsigned int *rowids, const int n_sampled_rows, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeRegressor<float>> dt_regressor = std::make_shared<DecisionTreeRegressor<float>>(); dt_regressor->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, tree, tree_params); } void decisionTreeRegressorFit(const ML::cumlHandle &handle, TreeRegressorD *&tree, double *data, const int ncols, const int nrows, double *labels, unsigned int *rowids, const int n_sampled_rows, DecisionTree::DecisionTreeParams tree_params) { std::shared_ptr<DecisionTreeRegressor<double>> dt_regressor = std::make_shared<DecisionTreeRegressor<double>>(); dt_regressor->fit(handle, data, ncols, nrows, labels, rowids, n_sampled_rows, tree, tree_params); } void decisionTreeRegressorPredict(const ML::cumlHandle &handle, const TreeRegressorF *tree, const float *rows, const int n_rows, const int n_cols, float *predictions, int verbosity) { std::shared_ptr<DecisionTreeRegressor<float>> dt_regressor = std::make_shared<DecisionTreeRegressor<float>>(); dt_regressor->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } void decisionTreeRegressorPredict(const ML::cumlHandle &handle, const TreeRegressorD *tree, const double *rows, const int n_rows, const int n_cols, double *predictions, int verbosity) { std::shared_ptr<DecisionTreeRegressor<double>> dt_regressor = std::make_shared<DecisionTreeRegressor<double>>(); dt_regressor->predict(handle, tree, rows, n_rows, n_cols, predictions, verbosity); } // Functions' specializations template void print_tree_summary<float, int>(const TreeClassifierF *tree); template void print_tree_summary<double, int>(const TreeClassifierD *tree); template void print_tree_summary<float, float>(const TreeRegressorF *tree); template void print_tree_summary<double, double>(const TreeRegressorD *tree); template void print_tree<float, int>(const TreeClassifierF *tree); template void print_tree<double, int>(const TreeClassifierD *tree); template void print_tree<float, float>(const TreeRegressorF *tree); template void print_tree<double, double>(const TreeRegressorD *tree); } // End namespace DecisionTree } //End namespace ML
9064af1dded994b85a20d5e12378b9fee0f4bd79.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using namespace std; #include "GPU.h" int GPU::CoresPerSM(int major, int minor){ int cores = 0; switch (major){ case 2: // Fermi if (minor == 1) cores = 48; else cores = 32; break; case 3: // Kepler cores = 192; break; case 5: // Maxwell cores = 128; break; case 6: // Pascal if ((minor == 1) || (minor == 2)) cores = 128; else if (minor == 0) cores = 64; else printf("Unknown device type\n"); break; case 7: // Volta and Turing if ((minor == 0) || (minor == 5)) cores = 64; else printf("Unknown device type\n"); break; case 8: // Ampere if (minor == 0) cores = 64; else if (minor == 6) cores = 128; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } void GPU::info(){ int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); hipSharedMemConfig pConfig; hipDeviceGetSharedMemConfig (&pConfig ); cout << "Device Number: " << i << endl; cout << " Device name: " << prop.name << endl; cout << " Compute capability: " << prop.major << "." << prop.minor << endl; int cs = CoresPerSM(prop.major, prop.minor); cout << " (SMs, Cuda cores/SM, Cuda cores): (" << prop.multiProcessorCount; cout << ", " << cs << ", " << cs* prop.multiProcessorCount << ")\n" << endl; cout << " Warp Size: " << prop.warpSize << endl; cout << " Maximum number of threads per block: " << prop.maxThreadsPerBlock << endl; cout << " Maximum size of a block: (" << prop.maxThreadsDim[0] << ", "; cout << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")" << endl; cout << " Maximum size of a grid: (" << prop.maxGridSize[0] << ", "; cout << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << endl; cout << " Maximum number of threads per SM: " << prop.maxThreadsPerMultiProcessor << endl; cout << " Maximum number of warps per SM: " << prop.maxThreadsPerMultiProcessor/prop.warpSize << '\n' << endl; cout << " Shared memory available per multiprocess: " << prop.sharedMemPerMultiprocessor/1024.0 << " kb" << endl; cout << " Shared memory available per block: " << prop.sharedMemPerBlock/1024.0 << " kb" << endl; cout << " Shared memory bank width: "; switch(pConfig){ case 0: cout << "default\n" << endl; break; case 1: cout << "4 Byte\n" << endl; break; case 2: cout << "8 Byte\n" << endl; break; default: cout << "Unknown\n" << endl; break; } cout << " Total number of registers available per block: " << prop.regsPerBlock << endl; cout << " Total number of registers available per SM: " << prop.regsPerMultiprocessor << endl; cout << " Total memory: " << prop.totalGlobalMem/1073741824.0 << " Gb" << endl; cout << " Peak Memory Bandwidth " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8.0)/1.0e6 << " Gb/s" << endl; cout << "\nCUDA:\n" << endl; int dv, rv; hipDriverGetVersion(&dv); hipRuntimeGetVersion(&rv); cout << " CUDA Driver Version: " << dv/1000 << ", " << (dv%100/10) << endl; cout << " CUDA Runtime Version: " << rv/1000 << ", " << (rv%100/10) << endl; } }
9064af1dded994b85a20d5e12378b9fee0f4bd79.cu
#include <iostream> using namespace std; #include "GPU.h" int GPU::CoresPerSM(int major, int minor){ int cores = 0; switch (major){ case 2: // Fermi if (minor == 1) cores = 48; else cores = 32; break; case 3: // Kepler cores = 192; break; case 5: // Maxwell cores = 128; break; case 6: // Pascal if ((minor == 1) || (minor == 2)) cores = 128; else if (minor == 0) cores = 64; else printf("Unknown device type\n"); break; case 7: // Volta and Turing if ((minor == 0) || (minor == 5)) cores = 64; else printf("Unknown device type\n"); break; case 8: // Ampere if (minor == 0) cores = 64; else if (minor == 6) cores = 128; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } void GPU::info(){ int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); cudaSharedMemConfig pConfig; cudaDeviceGetSharedMemConfig (&pConfig ); cout << "Device Number: " << i << endl; cout << " Device name: " << prop.name << endl; cout << " Compute capability: " << prop.major << "." << prop.minor << endl; int cs = CoresPerSM(prop.major, prop.minor); cout << " (SMs, Cuda cores/SM, Cuda cores): (" << prop.multiProcessorCount; cout << ", " << cs << ", " << cs* prop.multiProcessorCount << ")\n" << endl; cout << " Warp Size: " << prop.warpSize << endl; cout << " Maximum number of threads per block: " << prop.maxThreadsPerBlock << endl; cout << " Maximum size of a block: (" << prop.maxThreadsDim[0] << ", "; cout << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")" << endl; cout << " Maximum size of a grid: (" << prop.maxGridSize[0] << ", "; cout << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << endl; cout << " Maximum number of threads per SM: " << prop.maxThreadsPerMultiProcessor << endl; cout << " Maximum number of warps per SM: " << prop.maxThreadsPerMultiProcessor/prop.warpSize << '\n' << endl; cout << " Shared memory available per multiprocess: " << prop.sharedMemPerMultiprocessor/1024.0 << " kb" << endl; cout << " Shared memory available per block: " << prop.sharedMemPerBlock/1024.0 << " kb" << endl; cout << " Shared memory bank width: "; switch(pConfig){ case 0: cout << "default\n" << endl; break; case 1: cout << "4 Byte\n" << endl; break; case 2: cout << "8 Byte\n" << endl; break; default: cout << "Unknown\n" << endl; break; } cout << " Total number of registers available per block: " << prop.regsPerBlock << endl; cout << " Total number of registers available per SM: " << prop.regsPerMultiprocessor << endl; cout << " Total memory: " << prop.totalGlobalMem/1073741824.0 << " Gb" << endl; cout << " Peak Memory Bandwidth " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8.0)/1.0e6 << " Gb/s" << endl; cout << "\nCUDA:\n" << endl; int dv, rv; cudaDriverGetVersion(&dv); cudaRuntimeGetVersion(&rv); cout << " CUDA Driver Version: " << dv/1000 << ", " << (dv%100/10) << endl; cout << " CUDA Runtime Version: " << rv/1000 << ", " << (rv%100/10) << endl; } }
e94978b5a9408fc06bbe5575bbb9a40ec2aa5247.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <memory> #include <limits.h> #include <stdio.h> #include <math.h> #include <math_constants.h> // CUDART_NAN /** \file acopredictor_ant_cycle_cuda.cu */ #include "cuda_device_utilities.cuh" #include "acopredictor_ant_cycle_cuda.cuh" #include "acopredictor.h" #define THREADS_PER_BLOCK 128 using std::cout; using std::cerr; using std::vector; using std::string; using std::unique_ptr; /** \{ */ /** Relative direction constants stored in GPU memory. */ __device__ const char UP = 0; //__device__ const char DOWN = 1; __device__ const char LEFT = 2; __device__ const char RIGHT = 3; __device__ const char FRONT = 4; /** \}*/ __device__ double &CUDAThread::pheromone(int i, int d){ return pheromones[i*5 + d]; } __device__ int3 CUDAThread::DIRECTION_VECTOR(int3 prevDir, char dir){ if(dir == FRONT){ return prevDir; } // Will be either 1 or -1 int sign = prevDir.x + prevDir.y + prevDir.z; struct { char x, z; } isZero = { .x = (prevDir.x == 0), .z = (prevDir.z == 0), }; int3 retval = {0,0,0}; if(dir == RIGHT){ retval.x = sign * isZero.x; retval.y = sign * !isZero.x; } else if(dir == LEFT){ retval.x = -sign * isZero.x; retval.y = -sign * !isZero.x; } else if(dir == UP){ retval.z = 1 * isZero.z; retval.y = 1 * !isZero.z; } else /* if(dir == DOWN) */ { retval.z = -1 * isZero.z; retval.y = -1 * !isZero.z; } return retval; } __device__ int CUDAThread::calculate_contacts(int3 *solution){ int nContacts = 0; // Check if solution is invalidated if(solution[0].x == -1) return -1; for(int i = 0; i < nCoords; i++){ if(hpChain[i] == 'P') continue; for(int j = i+1; j < nCoords; j++){ int norm = norm1(solution[i] - solution[j]); if(norm == 0){ // Invalidate solution solution[0].x = -1; return -1; } else if(hpChain[j] == 'H' && norm == 1){ nContacts++; } } } return nContacts; } __device__ void CUDAThread::get_heuristics(int curSize, int3 *solution, double *heurs, int3 *possiblePos){ heurs[0] = 0; heurs[1] = 0; heurs[2] = 0; heurs[3] = 0; heurs[4] = 0; char horp = this->hpChain[curSize]; int contacts[5] = { 0, 0, 0, 0, 0 }; int collisions[5] = { 0, 0, 0, 0, 0 }; // Get number of contacts per possible position // Here we assume bead is H for(int i = 0; i < 5; i++){ int3 nextPos = possiblePos[i]; for(int j = 0; j < curSize; j++){ int norm = norm1(nextPos - solution[j]); if(norm == 0){ collisions[i]++; } else if(norm == 1 && this->hpChain[j] == 'H'){ contacts[i] += 1; } } } // If bead is P, we disregard the 'contacts' vector if(horp == 'P'){ for(int i = 0; i < 5; i++){ if(collisions[i] == 0) heurs[i] = 1.0; else heurs[i] = 0.0; } } else { for(int i = 0; i < 5; i++){ if(collisions[i] == 0) heurs[i] = 1.0 + contacts[i]; else heurs[i] = 0.0; } } } __device__ void CUDAThread::get_probabilities(int movIndex, double *probs, double *heurs){ probs[0] = 0.2; probs[1] = 0.2; probs[2] = 0.2; probs[3] = 0.2; probs[4] = 0.2; double sum = 0; for(int d = 0; d < 5; d++){ double A = powf(pheromone(movIndex, d), dAlpha); double B = powf(heurs[d], dBeta); double aux = A * B; sum += aux; probs[d] = aux; } // If sum is 0, would give us division by 0 if(sum == 0){ probs[0] = 0.2; probs[1] = 0.2; probs[2] = 0.2; probs[3] = 0.2; probs[4] = 0.2; return; } // sum should not be inf or nan. The user must control this. if(sum < -1E9 || sum > 1E9 || sum == CUDART_NAN){ printf("ERROR: Encountered unexpected 'Not a Number' or 'Inf'.\n" "Please control the ACO_ALPHA and ACO_BETA parameters more suitably.\n" "Keep in mind that the base for ACO_BETA may be higher than 0, " "and the base for ACO_ALPHA may be very near 0.\n"); } for(int d = 0; d < 5; d++){ probs[d] /= sum; } } __device__ void CUDAThread::develop_solution(int3 *solution, char *directions){ for(int i = 0; i < nMovElems; i++){ int3 prevDir = solution[i+1] - solution[i]; int3 prevBead = solution[i+1]; /* print(prevDir); print(prevBead); printf("\n"); */ int3 possiblePos[5] = { prevBead + DIRECTION_VECTOR(prevDir, 0), prevBead + DIRECTION_VECTOR(prevDir, 1), prevBead + DIRECTION_VECTOR(prevDir, 2), prevBead + DIRECTION_VECTOR(prevDir, 3), prevBead + DIRECTION_VECTOR(prevDir, 4) }; /* print(possiblePos[0]); print(possiblePos[1]); print(possiblePos[2]); print(possiblePos[3]); print(possiblePos[4]); printf("\n"); */ double heurs[5]; this->get_heuristics(i, solution, heurs, possiblePos); // If all heuristics are 0, there is no possible next direction to take. double sum = heurs[0] + heurs[1] + heurs[2] + heurs[3] + heurs[4]; if(sum == 0) solution[0].x = -1; // Signalizes error double probs[5]; this->get_probabilities(i, probs, heurs); // Accumulate the probability vector for(int j = 1; j < 5; j++) probs[j] += probs[j-1]; // Decide direction char direction = 0; // Get number within 0-1 double unifRand = randomize_d(randNumber); for(int j = 0; j < 5; j++){ if(unifRand < probs[j]){ direction = j; break; } } /* printf("Decided: %d: ", direction); print(possiblePos[direction]); printf("\n"); */ // Must be offset by 2, cuz the first 2 are (0,0,0) and (1,0,0) and we disconsider them solution[i+2] = possiblePos[direction]; directions[i] = direction; } } __device__ void CUDAThread::solution_from_directions(int3 *solution, char *directions){ solution[0] = {0,0,0}; solution[1] = {1,0,0}; for(int i = 0; i < nMovElems; i++){ int3 prevDirection = solution[i+1] - solution[i]; int3 backBead = solution[i+1]; solution[i+2] = backBead + DIRECTION_VECTOR(prevDirection, directions[i]); } } __device__ void CUDAThread::local_search(int &solContact, int lsFreq){ // Copy solution for(int i = 0; i < nCoords; i++) myOtherDirections[i] = myDirections[i]; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("Solution: "); for(int i = 0; i < nCoords; i++){ print(mySolution[i]); printf(" "); } printf("\n"); } */ for(int i = 0; i < lsFreq; i++){ int idx = randomize_d(randNumber) * nCoords; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("idx: %d ", idx); } */ char direction = randomize_d(randNumber) * 5; myOtherDirections[idx] = direction; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("olddir: %d dir: %d\n", myDirections[idx], direction); } */ solution_from_directions(myOtherSolution, myOtherDirections); int contacts = calculate_contacts(myOtherSolution); /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("Generated: "); for(int j = 0; j < nCoords; j++){ print(myOtherSolution[j]); printf(" "); } printf("\n"); } */ // Check if is better if(contacts > solContact){ // Update contacts solContact = contacts; // Update directions myDirections[idx] = myOtherDirections[idx]; // Update solution for(int j = 0; j < nCoords; j++) mySolution[j] = myOtherSolution[j]; } } } __global__ void HostToDevice::ant_develop_solution( double *pheromone, int nMovElems, int3 *solutions, int3 *moreSolutions, int nCoords, char *relDirections, char *moreRelDirections, int *contacts, char *hpChain, int lsFreq, double alpha, double beta ){ // Begin by handling shared memory extern __shared__ char shMem[]; for(int i = threadIdx.x; i < nCoords; i += blockDim.x){ shMem[i] = hpChain[i]; } __syncthreads(); CUDAThread *self = new CUDAThread(); self->tid = blockIdx.x * blockDim.x + threadIdx.x; self->randNumber = (13235632^(threadIdx.x*threadIdx.x+77))>>(threadIdx.x%13); // Get pointer to our data self->mySolution = solutions + nCoords*self->tid; self->myOtherSolution = moreSolutions + nCoords*self->tid; self->myDirections = relDirections + nMovElems*self->tid; self->myOtherDirections = moreRelDirections + nMovElems*self->tid; self->pheromones = pheromone; self->hpChain = (char*) shMem; self->nCoords = nCoords; self->nMovElems = nMovElems; self->dAlpha = alpha; self->dBeta = beta; self->develop_solution(self->mySolution, self->myDirections); // Now we calculate contacts // If collisions are found, the solution is invalidated (and contacts = -1). int nContacts = self->calculate_contacts(self->mySolution); // Then we perform local search self->local_search(nContacts, lsFreq); contacts[self->tid] = nContacts; /* DEBUG CONTACTS if(self->tid == 0){ for(int i = 0; i < gridDim.x * blockDim.x; i++){ printf("%d: %d\n", i, contacts[i]); } } */ delete self; /* DEBUG PROTEINS PRODUCED for(int i = 0; i < gridDim.x * blockDim.x; i++){ if(tid == i){ int3 *mySolution = get_solution(solutions, tid, nCoords); for(int j = 0; j < nMovElems+2; j++){ print(mySolution[j]); printf(" "); } printf("\n"); } __syncthreads(); } */ /* DEBUG PROGRESS for(int i = 0; i < 10; i++) printf("%d ", progress[i]); printf("\n"); */ /* DEBUG SOLUTION VECTORS for(int i = 0; i < 10; i++){ int3 a = solutions[nCoords*i]; int3 b = solutions[nCoords*i+1]; printf("(%d, %d, %d), (%d, %d, %d)\n", a.x, a.y, a.z, b.x, b.y, b.z); } */ /* DEBUG PHEROMONES for(int j = 0; j < 5; j++){ for(int i = 0; i < nMovElems; i++){ printf("%lf ", get_pheromone(pheromone, i, j)); } printf("\n"); } */ } __global__ void HostToDevice::find_best_solution( int *contacts, int nContacts, char *directions, int nMovElems, char *outDirections, int *outBestContact ){ // Index of best solutions __shared__ int shContacts[1024]; __shared__ int shIndex[1024]; int tid = threadIdx.x; int stride = blockDim.x; /* DEBUG FIND BEST if(tid == 0){ printf("Contacts: "); for(int i = 0; i < nContacts; i++) printf("%d ", contacts[i]); printf("\n"); }*/ int maxContacts = -1; int maxIndex = -1; for(int i = tid; i < nContacts; i += stride){ if(contacts[i] > maxContacts){ maxContacts = contacts[i]; maxIndex = i; } } shContacts[tid] = maxContacts; shIndex[tid] = maxIndex; __syncthreads(); // Reduce within shared memory for(int power = 512; power > 0; power /= 2){ if(tid < power){ if(shContacts[tid] < shContacts[tid+power]){ shContacts[tid] = shContacts[tid+power]; shIndex[tid] = shIndex[tid+power]; } } __syncthreads(); } /* DEBUG FIND BEST if(tid == 0){ printf("Best contacts: %d ", shContacts[0]); printf("Best index: %d\n", shIndex[0]); } */ // copy best directions into out buffer char *bestDir = directions + shIndex[0] * nMovElems; for(int i = tid; i < nMovElems; i += stride){ outDirections[i] = bestDir[i]; } // Copy contacts into out buffer too if(tid == 0) *outBestContact = shContacts[0]; } __global__ void HostToDevice::evaporate_pheromones(double *pheromones, int nMovElems, double evapRate){ const int nPheromones = nMovElems * 5; int tid = blockIdx.x*blockDim.x + threadIdx.x; int stride = gridDim.x*blockDim.x; for(int i = tid; i < nPheromones; i += stride) pheromones[i] *= (1 - evapRate); } __global__ void HostToDevice::deposit_pheromones( double *pheromones, int nMovElems, char *directions, int *contacts, int hCount ){ int tid = blockIdx.x * blockDim.x + threadIdx.x; char *myDirections = directions + nMovElems*tid; double pheroAmount = contacts[tid] / hCount; /* DEBUG DEPOSIT if(tid == 5){ for(int i = 0; i < 5; i++){ for(int j = 0; j < nMovElems; j++){ printf("%.3lf ", pheromones[j*5 + i]); } printf("\n"); } } */ for(int i = 0; i < nMovElems; i++){ int d = myDirections[i]; double *pheroPos = pheromones + (i*5 + d); atomicAdd_d(pheroPos, pheroAmount); } /* DEBUG DEPOSIT if(tid == 5){ for(int i = 0; i < 5; i++){ for(int j = 0; j < nMovElems; j++){ printf("%.3lf ", pheromones[j*5 + i]); } printf("\n"); } } */ } void ACOPredictor::perform_cycle(vector<ACOSolution> &antsSolutions, int *nContacts){ /* Data we need in the GPU: * - pheromone matrix * - Solutions initialized with (0,0,0) (1,0,0), and with enough space for N coordinates total * - Vectors with relative directions adopted for each solution * - Vector for tracking progress for each solution * - Vector of 5 possible next positions, for each solution * - Vector of contact count, for each solution. We sinalize lost proteins with negative contacts. * - Vector of HP chain * - A second vector of solutions, in which threads can hold "tentative" solutions. * To sinalize error in solutions, we will set the first coordinate to (-1,0,0) */ int nCoords = dNMovElems + 2; string hpChain = dHPChain.get_chain(); const int antsPerBlock = THREADS_PER_BLOCK; const int nBlocks = (dNAnts + antsPerBlock - 1) / antsPerBlock; const int totalAnts = antsPerBlock * nBlocks; // This is >= dNAnts // Allocation. We allocate more space just so that the extra threads don't cause segfaults ACODeviceData d = { .pheromone = dNMovElems*5, .solutions = nCoords*totalAnts, .moreSolutions = nCoords*totalAnts, .relDirections = dNMovElems*totalAnts, .moreRelDirections = dNMovElems*totalAnts, .contacts = totalAnts, .bestContact = 1, .hpChain = hpChain.length() }; // Copying d.pheromone.memcpyAsync(dPheromone); d.hpChain.memcpyAsync(hpChain.c_str()); int3 fillData[2] = {{0,0,0},{1,0,0}}; for(int i = 0; i < dNAnts; i++) hipMemcpyAsync(d.solutions.get() + i*nCoords, fillData, sizeof(int3)*2, hipMemcpyHostToDevice); int shMemBytes = 0; shMemBytes += hpChain.length() * sizeof(char); // Let GPU develop solutions // Here each ant develops a solution hipLaunchKernelGGL(( HostToDevice::ant_develop_solution), dim3(nBlocks),dim3(antsPerBlock),shMemBytes, 0, d.pheromone, dNMovElems, d.solutions, d.moreSolutions, nCoords, d.relDirections, d.moreRelDirections, d.contacts, d.hpChain, dLSFreq, dAlpha, dBeta); // We copy best solution into first solution d.moreRelDirections hipLaunchKernelGGL(( HostToDevice::find_best_solution), dim3(1),dim3(1024), 0, 0, d.contacts, dNAnts, d.relDirections, dNMovElems, d.moreRelDirections, d.bestContact); hipLaunchKernelGGL(( HostToDevice::evaporate_pheromones), dim3(1),dim3(1024), 0, 0, d.pheromone, dNMovElems, dEvap); hipLaunchKernelGGL(( HostToDevice::deposit_pheromones), dim3(nBlocks),dim3(antsPerBlock), 0, 0, d.pheromone, dNMovElems, d.relDirections, d.contacts, dHCount); // Fetching. We have to update the pheromones and best solutions, if needed int bestContact; char *bestDir = new char[dNMovElems]; d.pheromone.copyTo(dPheromone); d.bestContact.copyTo(&bestContact); d.moreRelDirections.copyTo(bestDir, dNMovElems); /* DEBUG Fetching cout << "Best: " << bestContact << "\n"; for(int i = 0; i < dNMovElems; i++){ cout << (int) bestDir[i] << " "; } cout << "\n"; */ if(bestContact > dBestContacts){ dBestContacts = bestContact; vector<char> vecDir(bestDir, bestDir + dNMovElems); dBestSol = ACOSolution(vecDir); } delete[] bestDir; }
e94978b5a9408fc06bbe5575bbb9a40ec2aa5247.cu
#include <iostream> #include <vector> #include <memory> #include <limits.h> #include <stdio.h> #include <math.h> #include <math_constants.h> // CUDART_NAN /** \file acopredictor_ant_cycle_cuda.cu */ #include "cuda_device_utilities.cuh" #include "acopredictor_ant_cycle_cuda.cuh" #include "acopredictor.h" #define THREADS_PER_BLOCK 128 using std::cout; using std::cerr; using std::vector; using std::string; using std::unique_ptr; /** \{ */ /** Relative direction constants stored in GPU memory. */ __device__ const char UP = 0; //__device__ const char DOWN = 1; __device__ const char LEFT = 2; __device__ const char RIGHT = 3; __device__ const char FRONT = 4; /** \}*/ __device__ double &CUDAThread::pheromone(int i, int d){ return pheromones[i*5 + d]; } __device__ int3 CUDAThread::DIRECTION_VECTOR(int3 prevDir, char dir){ if(dir == FRONT){ return prevDir; } // Will be either 1 or -1 int sign = prevDir.x + prevDir.y + prevDir.z; struct { char x, z; } isZero = { .x = (prevDir.x == 0), .z = (prevDir.z == 0), }; int3 retval = {0,0,0}; if(dir == RIGHT){ retval.x = sign * isZero.x; retval.y = sign * !isZero.x; } else if(dir == LEFT){ retval.x = -sign * isZero.x; retval.y = -sign * !isZero.x; } else if(dir == UP){ retval.z = 1 * isZero.z; retval.y = 1 * !isZero.z; } else /* if(dir == DOWN) */ { retval.z = -1 * isZero.z; retval.y = -1 * !isZero.z; } return retval; } __device__ int CUDAThread::calculate_contacts(int3 *solution){ int nContacts = 0; // Check if solution is invalidated if(solution[0].x == -1) return -1; for(int i = 0; i < nCoords; i++){ if(hpChain[i] == 'P') continue; for(int j = i+1; j < nCoords; j++){ int norm = norm1(solution[i] - solution[j]); if(norm == 0){ // Invalidate solution solution[0].x = -1; return -1; } else if(hpChain[j] == 'H' && norm == 1){ nContacts++; } } } return nContacts; } __device__ void CUDAThread::get_heuristics(int curSize, int3 *solution, double *heurs, int3 *possiblePos){ heurs[0] = 0; heurs[1] = 0; heurs[2] = 0; heurs[3] = 0; heurs[4] = 0; char horp = this->hpChain[curSize]; int contacts[5] = { 0, 0, 0, 0, 0 }; int collisions[5] = { 0, 0, 0, 0, 0 }; // Get number of contacts per possible position // Here we assume bead is H for(int i = 0; i < 5; i++){ int3 nextPos = possiblePos[i]; for(int j = 0; j < curSize; j++){ int norm = norm1(nextPos - solution[j]); if(norm == 0){ collisions[i]++; } else if(norm == 1 && this->hpChain[j] == 'H'){ contacts[i] += 1; } } } // If bead is P, we disregard the 'contacts' vector if(horp == 'P'){ for(int i = 0; i < 5; i++){ if(collisions[i] == 0) heurs[i] = 1.0; else heurs[i] = 0.0; } } else { for(int i = 0; i < 5; i++){ if(collisions[i] == 0) heurs[i] = 1.0 + contacts[i]; else heurs[i] = 0.0; } } } __device__ void CUDAThread::get_probabilities(int movIndex, double *probs, double *heurs){ probs[0] = 0.2; probs[1] = 0.2; probs[2] = 0.2; probs[3] = 0.2; probs[4] = 0.2; double sum = 0; for(int d = 0; d < 5; d++){ double A = powf(pheromone(movIndex, d), dAlpha); double B = powf(heurs[d], dBeta); double aux = A * B; sum += aux; probs[d] = aux; } // If sum is 0, would give us division by 0 if(sum == 0){ probs[0] = 0.2; probs[1] = 0.2; probs[2] = 0.2; probs[3] = 0.2; probs[4] = 0.2; return; } // sum should not be inf or nan. The user must control this. if(sum < -1E9 || sum > 1E9 || sum == CUDART_NAN){ printf("ERROR: Encountered unexpected 'Not a Number' or 'Inf'.\n" "Please control the ACO_ALPHA and ACO_BETA parameters more suitably.\n" "Keep in mind that the base for ACO_BETA may be higher than 0, " "and the base for ACO_ALPHA may be very near 0.\n"); } for(int d = 0; d < 5; d++){ probs[d] /= sum; } } __device__ void CUDAThread::develop_solution(int3 *solution, char *directions){ for(int i = 0; i < nMovElems; i++){ int3 prevDir = solution[i+1] - solution[i]; int3 prevBead = solution[i+1]; /* print(prevDir); print(prevBead); printf("\n"); */ int3 possiblePos[5] = { prevBead + DIRECTION_VECTOR(prevDir, 0), prevBead + DIRECTION_VECTOR(prevDir, 1), prevBead + DIRECTION_VECTOR(prevDir, 2), prevBead + DIRECTION_VECTOR(prevDir, 3), prevBead + DIRECTION_VECTOR(prevDir, 4) }; /* print(possiblePos[0]); print(possiblePos[1]); print(possiblePos[2]); print(possiblePos[3]); print(possiblePos[4]); printf("\n"); */ double heurs[5]; this->get_heuristics(i, solution, heurs, possiblePos); // If all heuristics are 0, there is no possible next direction to take. double sum = heurs[0] + heurs[1] + heurs[2] + heurs[3] + heurs[4]; if(sum == 0) solution[0].x = -1; // Signalizes error double probs[5]; this->get_probabilities(i, probs, heurs); // Accumulate the probability vector for(int j = 1; j < 5; j++) probs[j] += probs[j-1]; // Decide direction char direction = 0; // Get number within 0-1 double unifRand = randomize_d(randNumber); for(int j = 0; j < 5; j++){ if(unifRand < probs[j]){ direction = j; break; } } /* printf("Decided: %d: ", direction); print(possiblePos[direction]); printf("\n"); */ // Must be offset by 2, cuz the first 2 are (0,0,0) and (1,0,0) and we disconsider them solution[i+2] = possiblePos[direction]; directions[i] = direction; } } __device__ void CUDAThread::solution_from_directions(int3 *solution, char *directions){ solution[0] = {0,0,0}; solution[1] = {1,0,0}; for(int i = 0; i < nMovElems; i++){ int3 prevDirection = solution[i+1] - solution[i]; int3 backBead = solution[i+1]; solution[i+2] = backBead + DIRECTION_VECTOR(prevDirection, directions[i]); } } __device__ void CUDAThread::local_search(int &solContact, int lsFreq){ // Copy solution for(int i = 0; i < nCoords; i++) myOtherDirections[i] = myDirections[i]; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("Solution: "); for(int i = 0; i < nCoords; i++){ print(mySolution[i]); printf(" "); } printf("\n"); } */ for(int i = 0; i < lsFreq; i++){ int idx = randomize_d(randNumber) * nCoords; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("idx: %d ", idx); } */ char direction = randomize_d(randNumber) * 5; myOtherDirections[idx] = direction; /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("olddir: %d dir: %d\n", myDirections[idx], direction); } */ solution_from_directions(myOtherSolution, myOtherDirections); int contacts = calculate_contacts(myOtherSolution); /* DEBUG LOCAL SEARCH if(this->tid == 5){ printf("Generated: "); for(int j = 0; j < nCoords; j++){ print(myOtherSolution[j]); printf(" "); } printf("\n"); } */ // Check if is better if(contacts > solContact){ // Update contacts solContact = contacts; // Update directions myDirections[idx] = myOtherDirections[idx]; // Update solution for(int j = 0; j < nCoords; j++) mySolution[j] = myOtherSolution[j]; } } } __global__ void HostToDevice::ant_develop_solution( double *pheromone, int nMovElems, int3 *solutions, int3 *moreSolutions, int nCoords, char *relDirections, char *moreRelDirections, int *contacts, char *hpChain, int lsFreq, double alpha, double beta ){ // Begin by handling shared memory extern __shared__ char shMem[]; for(int i = threadIdx.x; i < nCoords; i += blockDim.x){ shMem[i] = hpChain[i]; } __syncthreads(); CUDAThread *self = new CUDAThread(); self->tid = blockIdx.x * blockDim.x + threadIdx.x; self->randNumber = (13235632^(threadIdx.x*threadIdx.x+77))>>(threadIdx.x%13); // Get pointer to our data self->mySolution = solutions + nCoords*self->tid; self->myOtherSolution = moreSolutions + nCoords*self->tid; self->myDirections = relDirections + nMovElems*self->tid; self->myOtherDirections = moreRelDirections + nMovElems*self->tid; self->pheromones = pheromone; self->hpChain = (char*) shMem; self->nCoords = nCoords; self->nMovElems = nMovElems; self->dAlpha = alpha; self->dBeta = beta; self->develop_solution(self->mySolution, self->myDirections); // Now we calculate contacts // If collisions are found, the solution is invalidated (and contacts = -1). int nContacts = self->calculate_contacts(self->mySolution); // Then we perform local search self->local_search(nContacts, lsFreq); contacts[self->tid] = nContacts; /* DEBUG CONTACTS if(self->tid == 0){ for(int i = 0; i < gridDim.x * blockDim.x; i++){ printf("%d: %d\n", i, contacts[i]); } } */ delete self; /* DEBUG PROTEINS PRODUCED for(int i = 0; i < gridDim.x * blockDim.x; i++){ if(tid == i){ int3 *mySolution = get_solution(solutions, tid, nCoords); for(int j = 0; j < nMovElems+2; j++){ print(mySolution[j]); printf(" "); } printf("\n"); } __syncthreads(); } */ /* DEBUG PROGRESS for(int i = 0; i < 10; i++) printf("%d ", progress[i]); printf("\n"); */ /* DEBUG SOLUTION VECTORS for(int i = 0; i < 10; i++){ int3 a = solutions[nCoords*i]; int3 b = solutions[nCoords*i+1]; printf("(%d, %d, %d), (%d, %d, %d)\n", a.x, a.y, a.z, b.x, b.y, b.z); } */ /* DEBUG PHEROMONES for(int j = 0; j < 5; j++){ for(int i = 0; i < nMovElems; i++){ printf("%lf ", get_pheromone(pheromone, i, j)); } printf("\n"); } */ } __global__ void HostToDevice::find_best_solution( int *contacts, int nContacts, char *directions, int nMovElems, char *outDirections, int *outBestContact ){ // Index of best solutions __shared__ int shContacts[1024]; __shared__ int shIndex[1024]; int tid = threadIdx.x; int stride = blockDim.x; /* DEBUG FIND BEST if(tid == 0){ printf("Contacts: "); for(int i = 0; i < nContacts; i++) printf("%d ", contacts[i]); printf("\n"); }*/ int maxContacts = -1; int maxIndex = -1; for(int i = tid; i < nContacts; i += stride){ if(contacts[i] > maxContacts){ maxContacts = contacts[i]; maxIndex = i; } } shContacts[tid] = maxContacts; shIndex[tid] = maxIndex; __syncthreads(); // Reduce within shared memory for(int power = 512; power > 0; power /= 2){ if(tid < power){ if(shContacts[tid] < shContacts[tid+power]){ shContacts[tid] = shContacts[tid+power]; shIndex[tid] = shIndex[tid+power]; } } __syncthreads(); } /* DEBUG FIND BEST if(tid == 0){ printf("Best contacts: %d ", shContacts[0]); printf("Best index: %d\n", shIndex[0]); } */ // copy best directions into out buffer char *bestDir = directions + shIndex[0] * nMovElems; for(int i = tid; i < nMovElems; i += stride){ outDirections[i] = bestDir[i]; } // Copy contacts into out buffer too if(tid == 0) *outBestContact = shContacts[0]; } __global__ void HostToDevice::evaporate_pheromones(double *pheromones, int nMovElems, double evapRate){ const int nPheromones = nMovElems * 5; int tid = blockIdx.x*blockDim.x + threadIdx.x; int stride = gridDim.x*blockDim.x; for(int i = tid; i < nPheromones; i += stride) pheromones[i] *= (1 - evapRate); } __global__ void HostToDevice::deposit_pheromones( double *pheromones, int nMovElems, char *directions, int *contacts, int hCount ){ int tid = blockIdx.x * blockDim.x + threadIdx.x; char *myDirections = directions + nMovElems*tid; double pheroAmount = contacts[tid] / hCount; /* DEBUG DEPOSIT if(tid == 5){ for(int i = 0; i < 5; i++){ for(int j = 0; j < nMovElems; j++){ printf("%.3lf ", pheromones[j*5 + i]); } printf("\n"); } } */ for(int i = 0; i < nMovElems; i++){ int d = myDirections[i]; double *pheroPos = pheromones + (i*5 + d); atomicAdd_d(pheroPos, pheroAmount); } /* DEBUG DEPOSIT if(tid == 5){ for(int i = 0; i < 5; i++){ for(int j = 0; j < nMovElems; j++){ printf("%.3lf ", pheromones[j*5 + i]); } printf("\n"); } } */ } void ACOPredictor::perform_cycle(vector<ACOSolution> &antsSolutions, int *nContacts){ /* Data we need in the GPU: * - pheromone matrix * - Solutions initialized with (0,0,0) (1,0,0), and with enough space for N coordinates total * - Vectors with relative directions adopted for each solution * - Vector for tracking progress for each solution * - Vector of 5 possible next positions, for each solution * - Vector of contact count, for each solution. We sinalize lost proteins with negative contacts. * - Vector of HP chain * - A second vector of solutions, in which threads can hold "tentative" solutions. * To sinalize error in solutions, we will set the first coordinate to (-1,0,0) */ int nCoords = dNMovElems + 2; string hpChain = dHPChain.get_chain(); const int antsPerBlock = THREADS_PER_BLOCK; const int nBlocks = (dNAnts + antsPerBlock - 1) / antsPerBlock; const int totalAnts = antsPerBlock * nBlocks; // This is >= dNAnts // Allocation. We allocate more space just so that the extra threads don't cause segfaults ACODeviceData d = { .pheromone = dNMovElems*5, .solutions = nCoords*totalAnts, .moreSolutions = nCoords*totalAnts, .relDirections = dNMovElems*totalAnts, .moreRelDirections = dNMovElems*totalAnts, .contacts = totalAnts, .bestContact = 1, .hpChain = hpChain.length() }; // Copying d.pheromone.memcpyAsync(dPheromone); d.hpChain.memcpyAsync(hpChain.c_str()); int3 fillData[2] = {{0,0,0},{1,0,0}}; for(int i = 0; i < dNAnts; i++) cudaMemcpyAsync(d.solutions.get() + i*nCoords, fillData, sizeof(int3)*2, cudaMemcpyHostToDevice); int shMemBytes = 0; shMemBytes += hpChain.length() * sizeof(char); // Let GPU develop solutions // Here each ant develops a solution HostToDevice::ant_develop_solution<<<nBlocks,antsPerBlock,shMemBytes>>>(d.pheromone, dNMovElems, d.solutions, d.moreSolutions, nCoords, d.relDirections, d.moreRelDirections, d.contacts, d.hpChain, dLSFreq, dAlpha, dBeta); // We copy best solution into first solution d.moreRelDirections HostToDevice::find_best_solution<<<1,1024>>>(d.contacts, dNAnts, d.relDirections, dNMovElems, d.moreRelDirections, d.bestContact); HostToDevice::evaporate_pheromones<<<1,1024>>>(d.pheromone, dNMovElems, dEvap); HostToDevice::deposit_pheromones<<<nBlocks,antsPerBlock>>>(d.pheromone, dNMovElems, d.relDirections, d.contacts, dHCount); // Fetching. We have to update the pheromones and best solutions, if needed int bestContact; char *bestDir = new char[dNMovElems]; d.pheromone.copyTo(dPheromone); d.bestContact.copyTo(&bestContact); d.moreRelDirections.copyTo(bestDir, dNMovElems); /* DEBUG Fetching cout << "Best: " << bestContact << "\n"; for(int i = 0; i < dNMovElems; i++){ cout << (int) bestDir[i] << " "; } cout << "\n"; */ if(bestContact > dBestContacts){ dBestContacts = bestContact; vector<char> vecDir(bestDir, bestDir + dNMovElems); dBestSol = ACOSolution(vecDir); } delete[] bestDir; }
f320da142184c26b039826da31b7fc7a587f0075.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //CUDA. #include "device_launch_parameters.h" //CUDA. #include <iostream> //C++ standard I/O. #include <exception> //C++ exceptions. #include <Windows.h> //Windows header. #include "ArgumentParser.h" //Argument Parser. #include "Matrix.h" //Matrix Object. #define THREAD_BLOCK_SIZE 3 //Default block size for matrix under the GPU. /** * @brief Code Usage. */ void usage(void) { std::cout << "Usage: ./MatrixMultiplicationCuda [FILE] [FILE]" << std::endl; } /** * @brief Matrix Struct used under the CUDA environment. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ typedef struct { int* elements; int width; int height; } MatrixStruct; /** * @brief The Multiplication that occurs under a CUDA thread. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ __global__ void multiplyMatrixesGPU(MatrixStruct a, MatrixStruct b, MatrixStruct c) { int calc = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row > a.height) || (col > b.width)) return; for (int i = 0; i < a.width; i++) { calc += (a.elements[row * a.width + i]) * (b.elements[i * b.width + col]); } c.elements[row * c.width + col] = calc; } /** * @brief Print 3x3 Matrixes. * @details Print the matrixes[formatted]. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ void print3x3(const Matrix* a, const Matrix* b, const Matrix* c) { for (int i = 0; i < c->getWidth(); i++) { std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << a->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << (i == c->getWidth()/2 ? " * " : " "); std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << b->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << (i == c->getWidth() / 2 ? " = " : " "); std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << c->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << std::endl; } } /** * @brief Prepare the matrix to be used under CUDA and calls the cuda kernel. * * @param a First matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ void multiplyMatrixes(const Matrix* a, const Matrix* b, Matrix* c) { MatrixStruct gpu_a = { nullptr, a->getWidth(), a->getHeight() }; MatrixStruct gpu_b = { nullptr, b->getWidth(), b->getHeight() }; MatrixStruct gpu_c = { nullptr, c->getWidth(), c->getHeight() }; std::cout << "CUDA PREPARATION" << std::endl; std::cout << "-------------------------------------------------------------" << std::endl; hipError_t error = hipMalloc(&gpu_a.elements, gpu_a.height * gpu_a.width * sizeof(int)); std::cerr << "CUDA MALLOC A: " << hipGetErrorString(error) << std::endl; error = hipMemcpy(gpu_a.elements, a->getElements(), a->getWidth() * a->getHeight() * sizeof(int), hipMemcpyHostToDevice); std::cerr << "CUDA MEMCPY A: " << hipGetErrorString(error) << std::endl; error = hipMalloc(&gpu_b.elements, gpu_b.height * gpu_b.width * sizeof(int)); std::cerr << "CUDA MALLOC B: " << hipGetErrorString(error) << std::endl; error = hipMemcpy(gpu_b.elements, b->getElements(), b->getWidth() * b->getHeight() * sizeof(int), hipMemcpyHostToDevice); std::cerr << "CUDA MEMCPY B: " << hipGetErrorString(error) << std::endl; error = hipMalloc(&gpu_c.elements, gpu_c.height * gpu_c.width * sizeof(int)); std::cerr << "CUDA MALLOC C: " << hipGetErrorString(error) << std::endl; std::cout << "-------------------------------------------------------------\n" << std::endl; dim3 dimBlock(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); dim3 dimGrid((b->getWidth() + dimBlock.x - 1) / dimBlock.x, (a->getHeight() + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( multiplyMatrixesGPU) , dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c); std::cout << "KERNEL RUN" << std::endl; std::cout << "-------------------------------------------------------------" << std::endl; error = hipDeviceSynchronize(); std::cerr << "RUN KERNEL: " << hipGetErrorString(error) << std::endl; error = hipMemcpy(c->getElements(), gpu_c.elements, c->getWidth() * c->getHeight() * sizeof(int), hipMemcpyDeviceToHost); std::cerr << "FROM DEVICE TO HOST: " << hipGetErrorString(error) << std::endl; std::cout << "-------------------------------------------------------------\n" << std::endl; // Free device memory hipFree(gpu_a.elements); hipFree(gpu_b.elements); hipFree(gpu_c.elements); } /** * @brief The main function; * * @param argc Amount of arguments passed by the command line. * @param argv The arguments passed by the command line. * * @return 0 if no problem occured. */ int main(int argc, char** argv) { ArgumentParser* argParser = nullptr; Matrix* firstMatrix = nullptr; Matrix* secondMatrix = nullptr; Matrix* resultMatrix = nullptr; try { argParser = new ArgumentParser(argc, argv); } catch (std::exception& e) { std::cerr << "What(): " << e.what() << std::endl; } firstMatrix = argParser->prepareMatrix(MATRIX::FIRST); secondMatrix = argParser->prepareMatrix(MATRIX::SECOND); resultMatrix = new Matrix(firstMatrix->getHeight(), secondMatrix->getWidth()); multiplyMatrixes(firstMatrix, secondMatrix, resultMatrix); print3x3(firstMatrix, secondMatrix, resultMatrix); system("pause"); if (argParser != nullptr) { delete argParser; argParser = nullptr; } if (firstMatrix != nullptr) { delete firstMatrix; firstMatrix = nullptr; } if (secondMatrix != nullptr) { delete secondMatrix; secondMatrix = nullptr; } if (resultMatrix != nullptr) { delete resultMatrix; resultMatrix = nullptr; } return 0; }
f320da142184c26b039826da31b7fc7a587f0075.cu
#include "cuda_runtime.h" //CUDA. #include "device_launch_parameters.h" //CUDA. #include <iostream> //C++ standard I/O. #include <exception> //C++ exceptions. #include <Windows.h> //Windows header. #include "ArgumentParser.h" //Argument Parser. #include "Matrix.h" //Matrix Object. #define THREAD_BLOCK_SIZE 3 //Default block size for matrix under the GPU. /** * @brief Code Usage. */ void usage(void) { std::cout << "Usage: ./MatrixMultiplicationCuda [FILE] [FILE]" << std::endl; } /** * @brief Matrix Struct used under the CUDA environment. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ typedef struct { int* elements; int width; int height; } MatrixStruct; /** * @brief The Multiplication that occurs under a CUDA thread. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ __global__ void multiplyMatrixesGPU(MatrixStruct a, MatrixStruct b, MatrixStruct c) { int calc = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row > a.height) || (col > b.width)) return; for (int i = 0; i < a.width; i++) { calc += (a.elements[row * a.width + i]) * (b.elements[i * b.width + col]); } c.elements[row * c.width + col] = calc; } /** * @brief Print 3x3 Matrixes. * @details Print the matrixes[formatted]. * * @param a First Matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ void print3x3(const Matrix* a, const Matrix* b, const Matrix* c) { for (int i = 0; i < c->getWidth(); i++) { std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << a->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << (i == c->getWidth()/2 ? " * " : " "); std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << b->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << (i == c->getWidth() / 2 ? " = " : " "); std::cout << "|"; for (int j = 0; j < c->getHeight(); j++) { std::cout << c->getElements()[i*c->getWidth() + j] << (j == c->getHeight() - 1 ? "" : " "); } std::cout << "|"; std::cout << std::endl; } } /** * @brief Prepare the matrix to be used under CUDA and calls the cuda kernel. * * @param a First matrix. * @param b Second Matrix. * @param c Resulting Matrix. */ void multiplyMatrixes(const Matrix* a, const Matrix* b, Matrix* c) { MatrixStruct gpu_a = { nullptr, a->getWidth(), a->getHeight() }; MatrixStruct gpu_b = { nullptr, b->getWidth(), b->getHeight() }; MatrixStruct gpu_c = { nullptr, c->getWidth(), c->getHeight() }; std::cout << "CUDA PREPARATION" << std::endl; std::cout << "-------------------------------------------------------------" << std::endl; cudaError error = cudaMalloc(&gpu_a.elements, gpu_a.height * gpu_a.width * sizeof(int)); std::cerr << "CUDA MALLOC A: " << cudaGetErrorString(error) << std::endl; error = cudaMemcpy(gpu_a.elements, a->getElements(), a->getWidth() * a->getHeight() * sizeof(int), cudaMemcpyHostToDevice); std::cerr << "CUDA MEMCPY A: " << cudaGetErrorString(error) << std::endl; error = cudaMalloc(&gpu_b.elements, gpu_b.height * gpu_b.width * sizeof(int)); std::cerr << "CUDA MALLOC B: " << cudaGetErrorString(error) << std::endl; error = cudaMemcpy(gpu_b.elements, b->getElements(), b->getWidth() * b->getHeight() * sizeof(int), cudaMemcpyHostToDevice); std::cerr << "CUDA MEMCPY B: " << cudaGetErrorString(error) << std::endl; error = cudaMalloc(&gpu_c.elements, gpu_c.height * gpu_c.width * sizeof(int)); std::cerr << "CUDA MALLOC C: " << cudaGetErrorString(error) << std::endl; std::cout << "-------------------------------------------------------------\n" << std::endl; dim3 dimBlock(THREAD_BLOCK_SIZE, THREAD_BLOCK_SIZE); dim3 dimGrid((b->getWidth() + dimBlock.x - 1) / dimBlock.x, (a->getHeight() + dimBlock.y - 1) / dimBlock.y); multiplyMatrixesGPU <<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c); std::cout << "KERNEL RUN" << std::endl; std::cout << "-------------------------------------------------------------" << std::endl; error = cudaThreadSynchronize(); std::cerr << "RUN KERNEL: " << cudaGetErrorString(error) << std::endl; error = cudaMemcpy(c->getElements(), gpu_c.elements, c->getWidth() * c->getHeight() * sizeof(int), cudaMemcpyDeviceToHost); std::cerr << "FROM DEVICE TO HOST: " << cudaGetErrorString(error) << std::endl; std::cout << "-------------------------------------------------------------\n" << std::endl; // Free device memory cudaFree(gpu_a.elements); cudaFree(gpu_b.elements); cudaFree(gpu_c.elements); } /** * @brief The main function; * * @param argc Amount of arguments passed by the command line. * @param argv The arguments passed by the command line. * * @return 0 if no problem occured. */ int main(int argc, char** argv) { ArgumentParser* argParser = nullptr; Matrix* firstMatrix = nullptr; Matrix* secondMatrix = nullptr; Matrix* resultMatrix = nullptr; try { argParser = new ArgumentParser(argc, argv); } catch (std::exception& e) { std::cerr << "What(): " << e.what() << std::endl; } firstMatrix = argParser->prepareMatrix(MATRIX::FIRST); secondMatrix = argParser->prepareMatrix(MATRIX::SECOND); resultMatrix = new Matrix(firstMatrix->getHeight(), secondMatrix->getWidth()); multiplyMatrixes(firstMatrix, secondMatrix, resultMatrix); print3x3(firstMatrix, secondMatrix, resultMatrix); system("pause"); if (argParser != nullptr) { delete argParser; argParser = nullptr; } if (firstMatrix != nullptr) { delete firstMatrix; firstMatrix = nullptr; } if (secondMatrix != nullptr) { delete secondMatrix; secondMatrix = nullptr; } if (resultMatrix != nullptr) { delete resultMatrix; resultMatrix = nullptr; } return 0; }
2847a1775745ba4a664fd78271a1a0e13fb6616e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r29;\n\t" " .reg .s32 %r30;\n\t" " .reg .s32 %r31;\n\t" " .reg .s32 %r32;\n\t" " .reg .s32 %r33;\n\t" " .reg .s32 %r34;\n\t" " .reg .s32 %r35;\n\t" " .reg .s32 %r36;\n\t" " .reg .s32 %r37;\n\t" " .reg .s32 %r38;\n\t" " .reg .s32 %r39;\n\t" " .reg .s32 %r40;\n\t" " .reg .s32 %r41;\n\t" " .reg .s32 %r42;\n\t" " .reg .s32 %r43;\n\t" " .reg .s32 %r44;\n\t" " .reg .s32 %r45;\n\t" " .reg .f64 %r12;\n\t" " .reg .f64 %r13;\n\t" " .reg .f64 %r14;\n\t" " .reg .f64 %r15;\n\t" " .reg .f64 %r16;\n\t" " .reg .f64 %r17;\n\t" " .reg .f64 %r18;\n\t" " .reg .f64 %r19;\n\t" " .reg .f64 %r20;\n\t" " .reg .f64 %r21;\n\t" " .reg .f64 %r22;\n\t" " .reg .f64 %r23;\n\t" " .reg .f64 %r24;\n\t" " .reg .f64 %r25;\n\t" " .reg .f64 %r26;\n\t" " .reg .f64 %r27;\n\t" " .reg .f64 %r28;\n\t" "mov.f64 %r12, 4.4;\n\t" "mov.f64 %r13, %r12;\n\t" "mov.f64 %r14, 2.2;\n\t" "mov.f64 %r15, 3.3;\n\t" "mov.f64 %r16, 1.23;\n\t" "mov.f64 %r17, 2.42;\n\t" "mov.f64 %r18, 3.34;\n\t" "mov.f64 %r19, 5.62;\n\t" "mov.f64 %r20, 2.56;\n\t" "mov.f64 %r21, 1.56;\n\t" "mov.f64 %r22, 2.56;\n\t" "mov.f64 %r23, 5.56;\n\t" "mov.f64 %r24, 8.56;\n\t" "mov.f64 %r25, 3.56;\n\t" "mov.f64 %r26, 5.56;\n\t" "mov.f64 %r27, 6.56;\n\t" "mov.f64 %r28, 5.6;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // hipMalloc((void**)&d_A, sizeof(float)); // hipMalloc((void**)&d_B, sizeof(float)); // hipMalloc((void**)&d_C, sizeof(float)); hipMalloc((void**)&d_res, sizeof(float)); // hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); // hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations); hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence); hipDeviceSynchronize(); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost); return 0; }
2847a1775745ba4a664fd78271a1a0e13fb6616e.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r29;\n\t" " .reg .s32 %r30;\n\t" " .reg .s32 %r31;\n\t" " .reg .s32 %r32;\n\t" " .reg .s32 %r33;\n\t" " .reg .s32 %r34;\n\t" " .reg .s32 %r35;\n\t" " .reg .s32 %r36;\n\t" " .reg .s32 %r37;\n\t" " .reg .s32 %r38;\n\t" " .reg .s32 %r39;\n\t" " .reg .s32 %r40;\n\t" " .reg .s32 %r41;\n\t" " .reg .s32 %r42;\n\t" " .reg .s32 %r43;\n\t" " .reg .s32 %r44;\n\t" " .reg .s32 %r45;\n\t" " .reg .f64 %r12;\n\t" " .reg .f64 %r13;\n\t" " .reg .f64 %r14;\n\t" " .reg .f64 %r15;\n\t" " .reg .f64 %r16;\n\t" " .reg .f64 %r17;\n\t" " .reg .f64 %r18;\n\t" " .reg .f64 %r19;\n\t" " .reg .f64 %r20;\n\t" " .reg .f64 %r21;\n\t" " .reg .f64 %r22;\n\t" " .reg .f64 %r23;\n\t" " .reg .f64 %r24;\n\t" " .reg .f64 %r25;\n\t" " .reg .f64 %r26;\n\t" " .reg .f64 %r27;\n\t" " .reg .f64 %r28;\n\t" "mov.f64 %r12, 4.4;\n\t" "mov.f64 %r13, %r12;\n\t" "mov.f64 %r14, 2.2;\n\t" "mov.f64 %r15, 3.3;\n\t" "mov.f64 %r16, 1.23;\n\t" "mov.f64 %r17, 2.42;\n\t" "mov.f64 %r18, 3.34;\n\t" "mov.f64 %r19, 5.62;\n\t" "mov.f64 %r20, 2.56;\n\t" "mov.f64 %r21, 1.56;\n\t" "mov.f64 %r22, 2.56;\n\t" "mov.f64 %r23, 5.56;\n\t" "mov.f64 %r24, 8.56;\n\t" "mov.f64 %r25, 3.56;\n\t" "mov.f64 %r26, 5.56;\n\t" "mov.f64 %r27, 6.56;\n\t" "mov.f64 %r28, 5.6;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r13;\n\t" "cvt.rm.f64.s32 %r13, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r14;\n\t" "cvt.rm.f64.s32 %r14, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r15;\n\t" "cvt.rm.f64.s32 %r15, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r16;\n\t" "cvt.rm.f64.s32 %r16, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r17;\n\t" "cvt.rm.f64.s32 %r17, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r18;\n\t" "cvt.rm.f64.s32 %r18, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r19;\n\t" "cvt.rm.f64.s32 %r19, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r20;\n\t" "cvt.rm.f64.s32 %r20, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r21;\n\t" "cvt.rm.f64.s32 %r21, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r22;\n\t" "cvt.rm.f64.s32 %r22, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r23;\n\t" "cvt.rm.f64.s32 %r23, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r24;\n\t" "cvt.rm.f64.s32 %r24, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r25;\n\t" "cvt.rm.f64.s32 %r25, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r26;\n\t" "cvt.rm.f64.s32 %r26, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r27;\n\t" "cvt.rm.f64.s32 %r27, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r28;\n\t" "cvt.rm.f64.s32 %r28, %r45;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // cudaMalloc((void**)&d_A, sizeof(float)); // cudaMalloc((void**)&d_B, sizeof(float)); // cudaMalloc((void**)&d_C, sizeof(float)); cudaMalloc((void**)&d_res, sizeof(float)); // cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); // compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations); compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence); cudaDeviceSynchronize(); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost); return 0; }
c913b35c7e3654e78b2b98d4dd959d20f3b8ce77.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "constant_add_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data_l = NULL; hipMalloc(&data_l, XSIZE*YSIZE); float constant = 1; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( constant_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,constant,result); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( constant_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,constant,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( constant_add_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data_l,constant,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c913b35c7e3654e78b2b98d4dd959d20f3b8ce77.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "constant_add_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data_l = NULL; cudaMalloc(&data_l, XSIZE*YSIZE); float constant = 1; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); constant_add_kernel<<<gridBlock,threadBlock>>>(data_l,constant,result); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { constant_add_kernel<<<gridBlock,threadBlock>>>(data_l,constant,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { constant_add_kernel<<<gridBlock,threadBlock>>>(data_l,constant,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c11559d1a3675983f7180b608146d9e23676e0c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <thrust/device_ptr.h> #include <thrust/count.h> #include <thrust/execution_policy.h> #include <iostream> #include <helper_cuda.h> ///////////////////////////////////////////////////////////////// // Some utility code to define grid_stride_range // Normally this would be in a header but it's here // for didactic purposes. Uses #include "range.hpp" using namespace util::lang; // type alias to simplify typing... template<typename T> using step_range = typename range_proxy<T>::step_range_proxy; template <typename T> __device__ step_range<T> grid_stride_range(T begin, T end) { begin += blockDim.x * blockIdx.x + threadIdx.x; return range(begin, end).step(gridDim.x * blockDim.x); } ///////////////////////////////////////////////////////////////// template <typename T, typename Predicate> __device__ void count_if(int *count, T *data, int n, Predicate p) { for (auto i : grid_stride_range(0, n)) { if (p(data[i])) atomicAdd(count, 1); } } // Use count_if with a lambda function that searches for x, y, z or w // Note the use of range-based for loop and initializer_list inside the functor // We use auto so we don't have to know the type of the functor or array __global__ void xyzw_frequency(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; count_if(count, text, n, [&](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } __global__ void xyzw_frequency_thrust_device(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; *count = thrust::count_if(thrust::device, text, text+n, [=](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } // a bug in Thrust 1.8 causes warnings when this is uncommented // so commented out by default -- fixed in Thrust master branch #if 0 void xyzw_frequency_thrust_host(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; *count = thrust::count_if(thrust::host, text, text+n, [&](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } #endif int main(int argc, char** argv) { const char *filename = sdkFindFilePath("warandpeace.txt", argv[0]); int numBytes = 16*1048576; char *h_text = (char*)malloc(numBytes); // find first CUDA device int devID = findCudaDevice(argc, (const char **)argv); char *d_text; checkCudaErrors(hipMalloc((void**)&d_text, numBytes)); FILE *fp = fopen(filename, "r"); if (fp == NULL) { printf("Cannot find the input text file\n. Exiting..\n"); return EXIT_FAILURE; } int len = (int)fread(h_text, sizeof(char), numBytes, fp); fclose(fp); std::cout << "Read " << len << " byte corpus from " << filename << std::endl; checkCudaErrors(hipMemcpy(d_text, h_text, len, hipMemcpyHostToDevice)); int count = 0; int *d_count; checkCudaErrors(hipMalloc(&d_count, sizeof(int))); checkCudaErrors(hipMemset(d_count, 0, sizeof(int))); // Try uncommenting one kernel call at a time hipLaunchKernelGGL(( xyzw_frequency), dim3(8), dim3(256), 0, 0, d_count, d_text, len); hipLaunchKernelGGL(( xyzw_frequency_thrust_device), dim3(1), dim3(1), 0, 0, d_count, d_text, len); checkCudaErrors(hipMemcpy(&count, d_count, sizeof(int), hipMemcpyDeviceToHost)); //xyzw_frequency_thrust_host(&count, h_text, len); std::cout << "counted " << count << " instances of 'x', 'y', 'z', or 'w' in \"" << filename << "\"" << std::endl; checkCudaErrors(hipFree(d_count)); checkCudaErrors(hipFree(d_text)); return EXIT_SUCCESS; }
c11559d1a3675983f7180b608146d9e23676e0c8.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <thrust/device_ptr.h> #include <thrust/count.h> #include <thrust/execution_policy.h> #include <iostream> #include <helper_cuda.h> ///////////////////////////////////////////////////////////////// // Some utility code to define grid_stride_range // Normally this would be in a header but it's here // for didactic purposes. Uses #include "range.hpp" using namespace util::lang; // type alias to simplify typing... template<typename T> using step_range = typename range_proxy<T>::step_range_proxy; template <typename T> __device__ step_range<T> grid_stride_range(T begin, T end) { begin += blockDim.x * blockIdx.x + threadIdx.x; return range(begin, end).step(gridDim.x * blockDim.x); } ///////////////////////////////////////////////////////////////// template <typename T, typename Predicate> __device__ void count_if(int *count, T *data, int n, Predicate p) { for (auto i : grid_stride_range(0, n)) { if (p(data[i])) atomicAdd(count, 1); } } // Use count_if with a lambda function that searches for x, y, z or w // Note the use of range-based for loop and initializer_list inside the functor // We use auto so we don't have to know the type of the functor or array __global__ void xyzw_frequency(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; count_if(count, text, n, [&](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } __global__ void xyzw_frequency_thrust_device(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; *count = thrust::count_if(thrust::device, text, text+n, [=](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } // a bug in Thrust 1.8 causes warnings when this is uncommented // so commented out by default -- fixed in Thrust master branch #if 0 void xyzw_frequency_thrust_host(int *count, char *text, int n) { const char letters[] { 'x','y','z','w' }; *count = thrust::count_if(thrust::host, text, text+n, [&](char c) { for (const auto x : letters) if (c == x) return true; return false; }); } #endif int main(int argc, char** argv) { const char *filename = sdkFindFilePath("warandpeace.txt", argv[0]); int numBytes = 16*1048576; char *h_text = (char*)malloc(numBytes); // find first CUDA device int devID = findCudaDevice(argc, (const char **)argv); char *d_text; checkCudaErrors(cudaMalloc((void**)&d_text, numBytes)); FILE *fp = fopen(filename, "r"); if (fp == NULL) { printf("Cannot find the input text file\n. Exiting..\n"); return EXIT_FAILURE; } int len = (int)fread(h_text, sizeof(char), numBytes, fp); fclose(fp); std::cout << "Read " << len << " byte corpus from " << filename << std::endl; checkCudaErrors(cudaMemcpy(d_text, h_text, len, cudaMemcpyHostToDevice)); int count = 0; int *d_count; checkCudaErrors(cudaMalloc(&d_count, sizeof(int))); checkCudaErrors(cudaMemset(d_count, 0, sizeof(int))); // Try uncommenting one kernel call at a time xyzw_frequency<<<8, 256>>>(d_count, d_text, len); xyzw_frequency_thrust_device<<<1, 1>>>(d_count, d_text, len); checkCudaErrors(cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost)); //xyzw_frequency_thrust_host(&count, h_text, len); std::cout << "counted " << count << " instances of 'x', 'y', 'z', or 'w' in \"" << filename << "\"" << std::endl; checkCudaErrors(cudaFree(d_count)); checkCudaErrors(cudaFree(d_text)); return EXIT_SUCCESS; }
103910f32122fa4ea6966febfea1904ffd9228b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlarf.cu normal z -> s, Fri Sep 11 18:29:20 2015 @author Azzam Haidar */ #include "common_magma.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 //============================================================================== //============================================================================== __global__ void magma_slarf_kernel( int m, const float *dv, const float *dtau, float *dc, int lddc ) { if ( !MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) { const int tx = threadIdx.x; dc = dc + blockIdx.x * lddc; __shared__ float sum[ BLOCK_SIZE ]; float tmp; /* perform w := v**H * C */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_S_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_S_MUL( MAGMA_S_CNJG( dv[j] ), dc[j] ); } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); tmp = - MAGMA_S_CNJG(*dtau) * sum[0]; for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE ) dc[j] += tmp * dv[j]; if (tx == 0) dc[0] += tmp; } } //============================================================================== //============================================================================== __global__ void magma_slarf_smkernel( int m, int n, float *dv, float *dtau, float *dc, int lddc ) { if ( ! MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k += BLOCK_SIZEy ) { dc = dc + k * lddc; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; float lsum; /* w := v**H * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ) { if (j == 0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dv[j] ), dc[j] ); } sum[i][col] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*dtau) * sum[0][col]; for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) { if (j == 0) dc[j] += z__1; else dc[j] += z__1 * dv[j]; } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_slarf_sm(magma_int_t m, magma_int_t n, float *dv, float *dtau, float *dc, magma_int_t lddc) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); hipLaunchKernelGGL(( magma_slarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, dv, dtau, dc, lddc ); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. */ extern "C" magma_int_t magma_slarf_gpu( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dv, magmaFloat_const_ptr dtau, magmaFloat_ptr dC, magma_int_t lddc) { dim3 grid( n, 1, 1 ); dim3 threads( BLOCK_SIZE ); if ( n > 0 ) { hipLaunchKernelGGL(( magma_slarf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, dv, dtau, dC, lddc); } // The computation can be done on 1 SM with the following routine. // magma_slarf_sm(m, n, dv, dtau, dc, lddc); return MAGMA_SUCCESS; } //==============================================================================
103910f32122fa4ea6966febfea1904ffd9228b3.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from zlarf.cu normal z -> s, Fri Sep 11 18:29:20 2015 @author Azzam Haidar */ #include "common_magma.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 //============================================================================== //============================================================================== __global__ void magma_slarf_kernel( int m, const float *dv, const float *dtau, float *dc, int lddc ) { if ( !MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) { const int tx = threadIdx.x; dc = dc + blockIdx.x * lddc; __shared__ float sum[ BLOCK_SIZE ]; float tmp; /* perform w := v**H * C */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_S_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_S_MUL( MAGMA_S_CNJG( dv[j] ), dc[j] ); } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); tmp = - MAGMA_S_CNJG(*dtau) * sum[0]; for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE ) dc[j] += tmp * dv[j]; if (tx == 0) dc[0] += tmp; } } //============================================================================== //============================================================================== __global__ void magma_slarf_smkernel( int m, int n, float *dv, float *dtau, float *dc, int lddc ) { if ( ! MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k += BLOCK_SIZEy ) { dc = dc + k * lddc; __shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; float lsum; /* w := v**H * C */ lsum = MAGMA_S_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ) { if (j == 0) lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] ); else lsum += MAGMA_S_MUL( MAGMA_S_CNJG( dv[j] ), dc[j] ); } sum[i][col] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum ); /* C := C - v * w */ __syncthreads(); float z__1 = - MAGMA_S_CNJG(*dtau) * sum[0][col]; for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) { if (j == 0) dc[j] += z__1; else dc[j] += z__1 * dv[j]; } } } } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_slarf_sm(magma_int_t m, magma_int_t n, float *dv, float *dtau, float *dc, magma_int_t lddc) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); magma_slarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, dv, dtau, dc, lddc ); } //============================================================================== /* Apply a real elementary reflector H to a real M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a real scalar and v is a real vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. */ extern "C" magma_int_t magma_slarf_gpu( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dv, magmaFloat_const_ptr dtau, magmaFloat_ptr dC, magma_int_t lddc) { dim3 grid( n, 1, 1 ); dim3 threads( BLOCK_SIZE ); if ( n > 0 ) { magma_slarf_kernel<<< grid, threads, 0, magma_stream >>>( m, dv, dtau, dC, lddc); } // The computation can be done on 1 SM with the following routine. // magma_slarf_sm(m, n, dv, dtau, dc, lddc); return MAGMA_SUCCESS; } //==============================================================================
77c463ff42ef410da08809ba150a7dd8c3d84a8d.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdio> #include <ctime> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void sum_shared_mem(float *array) { int idx = threadIdx.x; float sum=0.0f; // Share among threads within the same block __shared__ float sh_array[1024]; sh_array[idx] = array[idx]; // Syncronize threads within the same block __syncthreads(); for (int i=0; i<=idx; i++){ sum+= sh_array[i]; } __syncthreads(); array[idx] = sum; } __global__ void sum_global_mem(float *array) { int idx = threadIdx.x; float sum=0.0f; for (int i=0; i<=idx; i++){ sum+= array[i]; } __syncthreads(); array[idx] = sum; } int main(void) { std::clock_t start_time; double duration01; double duration02; double duration03; const int ARR_BYTES = 1024*sizeof(float); // Clock start start_time = std::clock(); // Declare and alloc array on host float h_array[1024]; // initialize input array for (int i=0; i<1024; i++){ h_array[i] = float(i); } // Declare and alloc array on device float *d_array; hipMalloc(&d_array, ARR_BYTES); // Transfer to device hipMemcpy(d_array, h_array, ARR_BYTES, hipMemcpyHostToDevice); // Clock stop 01 duration01 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time before Kernel call: "<< duration01 << "s" << std::endl; // Call kernel function with shared memory hipLaunchKernelGGL(( sum_shared_mem), dim3(1), dim3(1024), 0, 0, d_array); // Call kernel function with shared memory // sum_global_mem<<<1, 1024>>>(d_array); // Clock stop 02 duration02 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time after Kernel call: "<< duration02 << "s" << std::endl; // Transfer results to host hipMemcpy(h_array, d_array, ARR_BYTES, hipMemcpyDeviceToHost); // Clock stop 03 duration03 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time after memory copy: "<< duration03 << "s" << std::endl; // Output results for(int ii=0; ii<10; ii++){ std::cout<< h_array[ii]<< ", "; } std::cout<< std::endl; return 0; }
77c463ff42ef410da08809ba150a7dd8c3d84a8d.cu
#include <cmath> #include <cstdio> #include <ctime> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> __global__ void sum_shared_mem(float *array) { int idx = threadIdx.x; float sum=0.0f; // Share among threads within the same block __shared__ float sh_array[1024]; sh_array[idx] = array[idx]; // Syncronize threads within the same block __syncthreads(); for (int i=0; i<=idx; i++){ sum+= sh_array[i]; } __syncthreads(); array[idx] = sum; } __global__ void sum_global_mem(float *array) { int idx = threadIdx.x; float sum=0.0f; for (int i=0; i<=idx; i++){ sum+= array[i]; } __syncthreads(); array[idx] = sum; } int main(void) { std::clock_t start_time; double duration01; double duration02; double duration03; const int ARR_BYTES = 1024*sizeof(float); // Clock start start_time = std::clock(); // Declare and alloc array on host float h_array[1024]; // initialize input array for (int i=0; i<1024; i++){ h_array[i] = float(i); } // Declare and alloc array on device float *d_array; cudaMalloc(&d_array, ARR_BYTES); // Transfer to device cudaMemcpy(d_array, h_array, ARR_BYTES, cudaMemcpyHostToDevice); // Clock stop 01 duration01 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time before Kernel call: "<< duration01 << "s" << std::endl; // Call kernel function with shared memory sum_shared_mem<<<1, 1024>>>(d_array); // Call kernel function with shared memory // sum_global_mem<<<1, 1024>>>(d_array); // Clock stop 02 duration02 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time after Kernel call: "<< duration02 << "s" << std::endl; // Transfer results to host cudaMemcpy(h_array, d_array, ARR_BYTES, cudaMemcpyDeviceToHost); // Clock stop 03 duration03 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time after memory copy: "<< duration03 << "s" << std::endl; // Output results for(int ii=0; ii<10; ii++){ std::cout<< h_array[ii]<< ", "; } std::cout<< std::endl; return 0; }
ef173e031412341b8fdda0ea1e33ca05d6002dc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace stereobp { /////////////////////////////////////////////////////////////// /////////////////////// load constants //////////////////////// /////////////////////////////////////////////////////////////// __constant__ int cndisp; __constant__ float cmax_data_term; __constant__ float cdata_weight; __constant__ float cmax_disc_term; __constant__ float cdisc_single_jump; void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump) { cudaSafeCall( hipMemcpyToSymbol(cndisp, &ndisp, sizeof(int )) ); cudaSafeCall( hipMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); cudaSafeCall( hipMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); } /////////////////////////////////////////////////////////////// ////////////////////////// comp data ////////////////////////// /////////////////////////////////////////////////////////////// template <int cn> struct PixDiff; template <> struct PixDiff<1> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *ls; } __device__ __forceinline__ float operator()(const uchar* rs) const { return ::abs((int)l - *rs); } uchar l; }; template <> struct PixDiff<3> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar3*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; float val = tb * ::abs((int)l.x - rs[0]); val += tg * ::abs((int)l.y - rs[1]); val += tr * ::abs((int)l.z - rs[2]); return val; } uchar3 l; }; template <> struct PixDiff<4> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar4*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; uchar4 r = *((uchar4*)rs); float val = tb * ::abs((int)l.x - r.x); val += tg * ::abs((int)l.y - r.y); val += tr * ::abs((int)l.z - r.z); return val; } uchar4 l; }; template <int cn, typename D> __global__ void comp_data(const PtrStepSzb left, const PtrStepb right, PtrStep<D> data) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1) { const uchar* ls = left.ptr(y) + x * cn; const PixDiff<cn> pixDiff(ls); const uchar* rs = right.ptr(y) + x * cn; D* ds = data.ptr(y) + x; const size_t disp_step = data.step * left.rows / sizeof(D); for (int disp = 0; disp < cndisp; disp++) { if (x - disp >= 1) { float val = pixDiff(rs - disp * cn); ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term)); } else { ds[disp * disp_step] = saturate_cast<D>(cdata_weight * cmax_data_term); } } } } template<typename T, typename D> void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream); template <> void comp_data_gpu<uchar, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<1, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <> void comp_data_gpu<uchar, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<1, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<3, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<3, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<4, short>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<short>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); hipLaunchKernelGGL(( comp_data<4, float>), dim3(grid), dim3(threads), 0, stream, left, right, (PtrStepSz<float>)data); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////////////////////////// //////////////////////// data step down /////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { for (int d = 0; d < cndisp; ++d) { float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)]; dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg); } } } template<typename T> void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); hipLaunchKernelGGL(( data_step_down<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream); template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, hipStream_t stream); /////////////////////////////////////////////////////////////// /////////////////// level up messages //////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { const size_t dst_disp_step = dst.step * dst_rows / sizeof(T); const size_t src_disp_step = src.step * src_rows / sizeof(T); T* dstr = dst.ptr(y ) + x; const T* srcr = src.ptr(y/2) + x/2; for (int d = 0; d < cndisp; ++d) dstr[d * dst_disp_step] = srcr[d * src_disp_step]; } } template <typename T> void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); int src_idx = (dst_idx + 1) & 1; hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mus[src_idx], (PtrStepSz<T>)mus[dst_idx]); cudaSafeCall( hipGetLastError() ); hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mds[src_idx], (PtrStepSz<T>)mds[dst_idx]); cudaSafeCall( hipGetLastError() ); hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mls[src_idx], (PtrStepSz<T>)mls[dst_idx]); cudaSafeCall( hipGetLastError() ); hipLaunchKernelGGL(( level_up_message<T>), dim3(grid), dim3(threads), 0, stream, dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mrs[src_idx], (PtrStepSz<T>)mrs[dst_idx]); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream); template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, hipStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void calc_min_linear_penalty(T* dst, size_t step) { float prev = dst[0]; float cur; for (int disp = 1; disp < cndisp; ++disp) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } prev = dst[(cndisp - 1) * step]; for (int disp = cndisp - 2; disp >= 0; disp--) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } } template <typename T> __device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step) { float minimum = device::numeric_limits<float>::max(); for(int i = 0; i < cndisp; ++i) { float dst_reg = msg1[msg_disp_step * i]; dst_reg += msg2[msg_disp_step * i]; dst_reg += msg3[msg_disp_step * i]; dst_reg += data[data_disp_step * i]; if (dst_reg < minimum) minimum = dst_reg; dst[msg_disp_step * i] = saturate_cast<T>(dst_reg); } calc_min_linear_penalty(dst, msg_disp_step); minimum += cmax_disc_term; float sum = 0; for(int i = 0; i < cndisp; ++i) { float dst_reg = dst[msg_disp_step * i]; if (dst_reg > minimum) { dst_reg = minimum; dst[msg_disp_step * i] = saturate_cast<T>(minimum); } sum += dst_reg; } sum /= cndisp; for(int i = 0; i < cndisp; ++i) dst[msg_disp_step * i] -= sum; } template <typename T> __global__ void one_iteration(int t, int elem_step, T* u, T* d, T* l, T* r, const PtrStep<T> data, int cols, int rows) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); if ((y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1)) { T* us = u + y * elem_step + x; T* ds = d + y * elem_step + x; T* ls = l + y * elem_step + x; T* rs = r + y * elem_step + x; const T* dt = data.ptr(y) + x; size_t msg_disp_step = elem_step * rows; size_t data_disp_step = data.step * rows / sizeof(T); message(us + elem_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step); message(ds - elem_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, rs - 1, dt, rs, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, ls + 1, dt, ls, msg_disp_step, data_disp_step); } } template <typename T> void calc_all_iterations_gpu(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(cols, threads.x << 1); grid.y = divUp(rows, threads.y); int elem_step = (int)(u.step / sizeof(T)); for(int t = 0; t < iters; ++t) { hipLaunchKernelGGL(( one_iteration<T>), dim3(grid), dim3(threads), 0, stream, t, elem_step, (T*)u.data, (T*)d.data, (T*)l.data, (T*)r.data, (PtrStepSz<T>)data, cols, rows); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream); template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, hipStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void output(const int elem_step, const T* u, const T* d, const T* l, const T* r, const T* data, PtrStepSz<short> disp) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { const T* us = u + (y + 1) * elem_step + x; const T* ds = d + (y - 1) * elem_step + x; const T* ls = l + y * elem_step + (x + 1); const T* rs = r + y * elem_step+ (x - 1); const T* dt = data + y * elem_step + x; size_t disp_step = disp.rows * elem_step; int best = 0; float best_val = numeric_limits<float>::max(); for (int d = 0; d < cndisp; ++d) { float val = us[d * disp_step]; val += ds[d * disp_step]; val += ls[d * disp_step]; val += rs[d * disp_step]; val += dt[d * disp_step]; if (val < best_val) { best_val = val; best = d; } } disp.ptr(y)[x] = saturate_cast<short>(best); } } template <typename T> void output_gpu(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, hipStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); int elem_step = static_cast<int>(u.step/sizeof(T)); hipLaunchKernelGGL(( output<T>), dim3(grid), dim3(threads), 0, stream, elem_step, (const T*)u.data, (const T*)d.data, (const T*)l.data, (const T*)r.data, (const T*)data.data, disp); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void output_gpu<short>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, hipStream_t stream); template void output_gpu<float>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, hipStream_t stream); } // namespace stereobp }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
ef173e031412341b8fdda0ea1e33ca05d6002dc4.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/limits.hpp" namespace cv { namespace gpu { namespace device { namespace stereobp { /////////////////////////////////////////////////////////////// /////////////////////// load constants //////////////////////// /////////////////////////////////////////////////////////////// __constant__ int cndisp; __constant__ float cmax_data_term; __constant__ float cdata_weight; __constant__ float cmax_disc_term; __constant__ float cdisc_single_jump; void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump) { cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int )) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_data_term, &max_data_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdata_weight, &data_weight, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cmax_disc_term, &max_disc_term, sizeof(float)) ); cudaSafeCall( cudaMemcpyToSymbol(cdisc_single_jump, &disc_single_jump, sizeof(float)) ); } /////////////////////////////////////////////////////////////// ////////////////////////// comp data ////////////////////////// /////////////////////////////////////////////////////////////// template <int cn> struct PixDiff; template <> struct PixDiff<1> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *ls; } __device__ __forceinline__ float operator()(const uchar* rs) const { return ::abs((int)l - *rs); } uchar l; }; template <> struct PixDiff<3> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar3*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; float val = tb * ::abs((int)l.x - rs[0]); val += tg * ::abs((int)l.y - rs[1]); val += tr * ::abs((int)l.z - rs[2]); return val; } uchar3 l; }; template <> struct PixDiff<4> { __device__ __forceinline__ PixDiff(const uchar* ls) { l = *((uchar4*)ls); } __device__ __forceinline__ float operator()(const uchar* rs) const { const float tr = 0.299f; const float tg = 0.587f; const float tb = 0.114f; uchar4 r = *((uchar4*)rs); float val = tb * ::abs((int)l.x - r.x); val += tg * ::abs((int)l.y - r.y); val += tr * ::abs((int)l.z - r.z); return val; } uchar4 l; }; template <int cn, typename D> __global__ void comp_data(const PtrStepSzb left, const PtrStepb right, PtrStep<D> data) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < left.rows - 1 && x > 0 && x < left.cols - 1) { const uchar* ls = left.ptr(y) + x * cn; const PixDiff<cn> pixDiff(ls); const uchar* rs = right.ptr(y) + x * cn; D* ds = data.ptr(y) + x; const size_t disp_step = data.step * left.rows / sizeof(D); for (int disp = 0; disp < cndisp; disp++) { if (x - disp >= 1) { float val = pixDiff(rs - disp * cn); ds[disp * disp_step] = saturate_cast<D>(fmin(cdata_weight * val, cdata_weight * cmax_data_term)); } else { ds[disp * disp_step] = saturate_cast<D>(cdata_weight * cmax_data_term); } } } } template<typename T, typename D> void comp_data_gpu(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream); template <> void comp_data_gpu<uchar, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<1, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<1, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<3, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar3, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<3, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, short>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<4, short><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<short>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <> void comp_data_gpu<uchar4, float>(const PtrStepSzb& left, const PtrStepSzb& right, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(left.cols, threads.x); grid.y = divUp(left.rows, threads.y); comp_data<4, float><<<grid, threads, 0, stream>>>(left, right, (PtrStepSz<float>)data); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////////////////////////// //////////////////////// data step down /////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void data_step_down(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { for (int d = 0; d < cndisp; ++d) { float dst_reg = src.ptr(d * src_rows + (2*y+0))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+0)]; dst_reg += src.ptr(d * src_rows + (2*y+0))[(2*x+1)]; dst_reg += src.ptr(d * src_rows + (2*y+1))[(2*x+1)]; dst.ptr(d * dst_rows + y)[x] = saturate_cast<T>(dst_reg); } } } template<typename T> void data_step_down_gpu(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); data_step_down<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)src, (PtrStepSz<T>)dst); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void data_step_down_gpu<short>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); template void data_step_down_gpu<float>(int dst_cols, int dst_rows, int src_rows, const PtrStepSzb& src, const PtrStepSzb& dst, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////// level up messages //////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void level_up_message(int dst_cols, int dst_rows, int src_rows, const PtrStep<T> src, PtrStep<T> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst_cols && y < dst_rows) { const size_t dst_disp_step = dst.step * dst_rows / sizeof(T); const size_t src_disp_step = src.step * src_rows / sizeof(T); T* dstr = dst.ptr(y ) + x; const T* srcr = src.ptr(y/2) + x/2; for (int d = 0; d < cndisp; ++d) dstr[d * dst_disp_step] = srcr[d * src_disp_step]; } } template <typename T> void level_up_messages_gpu(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(dst_cols, threads.x); grid.y = divUp(dst_rows, threads.y); int src_idx = (dst_idx + 1) & 1; level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mus[src_idx], (PtrStepSz<T>)mus[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mds[src_idx], (PtrStepSz<T>)mds[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mls[src_idx], (PtrStepSz<T>)mls[dst_idx]); cudaSafeCall( cudaGetLastError() ); level_up_message<T><<<grid, threads, 0, stream>>>(dst_cols, dst_rows, src_rows, (PtrStepSz<T>)mrs[src_idx], (PtrStepSz<T>)mrs[dst_idx]); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void level_up_messages_gpu<short>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream); template void level_up_messages_gpu<float>(int dst_idx, int dst_cols, int dst_rows, int src_rows, PtrStepSzb* mus, PtrStepSzb* mds, PtrStepSzb* mls, PtrStepSzb* mrs, cudaStream_t stream); /////////////////////////////////////////////////////////////// //////////////////// calc all iterations ///////////////////// /////////////////////////////////////////////////////////////// template <typename T> __device__ void calc_min_linear_penalty(T* dst, size_t step) { float prev = dst[0]; float cur; for (int disp = 1; disp < cndisp; ++disp) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } prev = dst[(cndisp - 1) * step]; for (int disp = cndisp - 2; disp >= 0; disp--) { prev += cdisc_single_jump; cur = dst[step * disp]; if (prev < cur) { cur = prev; dst[step * disp] = saturate_cast<T>(prev); } prev = cur; } } template <typename T> __device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step) { float minimum = device::numeric_limits<float>::max(); for(int i = 0; i < cndisp; ++i) { float dst_reg = msg1[msg_disp_step * i]; dst_reg += msg2[msg_disp_step * i]; dst_reg += msg3[msg_disp_step * i]; dst_reg += data[data_disp_step * i]; if (dst_reg < minimum) minimum = dst_reg; dst[msg_disp_step * i] = saturate_cast<T>(dst_reg); } calc_min_linear_penalty(dst, msg_disp_step); minimum += cmax_disc_term; float sum = 0; for(int i = 0; i < cndisp; ++i) { float dst_reg = dst[msg_disp_step * i]; if (dst_reg > minimum) { dst_reg = minimum; dst[msg_disp_step * i] = saturate_cast<T>(minimum); } sum += dst_reg; } sum /= cndisp; for(int i = 0; i < cndisp; ++i) dst[msg_disp_step * i] -= sum; } template <typename T> __global__ void one_iteration(int t, int elem_step, T* u, T* d, T* l, T* r, const PtrStep<T> data, int cols, int rows) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); if ((y > 0) && (y < rows - 1) && (x > 0) && (x < cols - 1)) { T* us = u + y * elem_step + x; T* ds = d + y * elem_step + x; T* ls = l + y * elem_step + x; T* rs = r + y * elem_step + x; const T* dt = data.ptr(y) + x; size_t msg_disp_step = elem_step * rows; size_t data_disp_step = data.step * rows / sizeof(T); message(us + elem_step, ls + 1, rs - 1, dt, us, msg_disp_step, data_disp_step); message(ds - elem_step, ls + 1, rs - 1, dt, ds, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, rs - 1, dt, rs, msg_disp_step, data_disp_step); message(us + elem_step, ds - elem_step, ls + 1, dt, ls, msg_disp_step, data_disp_step); } } template <typename T> void calc_all_iterations_gpu(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(cols, threads.x << 1); grid.y = divUp(rows, threads.y); int elem_step = (int)(u.step / sizeof(T)); for(int t = 0; t < iters; ++t) { one_iteration<T><<<grid, threads, 0, stream>>>(t, elem_step, (T*)u.data, (T*)d.data, (T*)l.data, (T*)r.data, (PtrStepSz<T>)data, cols, rows); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } template void calc_all_iterations_gpu<short>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream); template void calc_all_iterations_gpu<float>(int cols, int rows, int iters, const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, cudaStream_t stream); /////////////////////////////////////////////////////////////// /////////////////////////// output //////////////////////////// /////////////////////////////////////////////////////////////// template <typename T> __global__ void output(const int elem_step, const T* u, const T* d, const T* l, const T* r, const T* data, PtrStepSz<short> disp) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y > 0 && y < disp.rows - 1 && x > 0 && x < disp.cols - 1) { const T* us = u + (y + 1) * elem_step + x; const T* ds = d + (y - 1) * elem_step + x; const T* ls = l + y * elem_step + (x + 1); const T* rs = r + y * elem_step+ (x - 1); const T* dt = data + y * elem_step + x; size_t disp_step = disp.rows * elem_step; int best = 0; float best_val = numeric_limits<float>::max(); for (int d = 0; d < cndisp; ++d) { float val = us[d * disp_step]; val += ds[d * disp_step]; val += ls[d * disp_step]; val += rs[d * disp_step]; val += dt[d * disp_step]; if (val < best_val) { best_val = val; best = d; } } disp.ptr(y)[x] = saturate_cast<short>(best); } } template <typename T> void output_gpu(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(disp.cols, threads.x); grid.y = divUp(disp.rows, threads.y); int elem_step = static_cast<int>(u.step/sizeof(T)); output<T><<<grid, threads, 0, stream>>>(elem_step, (const T*)u.data, (const T*)d.data, (const T*)l.data, (const T*)r.data, (const T*)data.data, disp); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void output_gpu<short>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream); template void output_gpu<float>(const PtrStepSzb& u, const PtrStepSzb& d, const PtrStepSzb& l, const PtrStepSzb& r, const PtrStepSzb& data, const PtrStepSz<short>& disp, cudaStream_t stream); } // namespace stereobp }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
dfe6ccf01c9142bbf67937bd46386718ca2f8027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define ARRAY_SIZE 200 #define ARRAY_BYTES ARRAY_SIZE * sizeof(float) __global__ void CalculateSquare(float* p_out, float* p_in) { int index = threadIdx.x; float valueToSuqare = p_in[index]; p_out[index] = valueToSuqare * valueToSuqare; }
dfe6ccf01c9142bbf67937bd46386718ca2f8027.cu
#include "includes.h" #define ARRAY_SIZE 200 #define ARRAY_BYTES ARRAY_SIZE * sizeof(float) __global__ void CalculateSquare(float* p_out, float* p_in) { int index = threadIdx.x; float valueToSuqare = p_in[index]; p_out[index] = valueToSuqare * valueToSuqare; }
4a57fdc48f17e9a20e4bdaad2c07e5596ece8879.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 512 __global__ void zmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaDoubleComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); int start = drowptr[ row ] ; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; magmaDoubleComplex val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE , 0, m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
4a57fdc48f17e9a20e4bdaad2c07e5596ece8879.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @precisions normal z -> c d s */ #include "common_magma.h" #define BLOCK_SIZE 512 __global__ void zmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaDoubleComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); int start = drowptr[ row ] ; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; magmaDoubleComplex val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgecsrmv_kernel<<< grid, threads, MEM_SIZE >>> (m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
16cf42aecc3552aa34037b2861fdeca28dfc9ea6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // B=diag(A) extern "C" { __global__ void diag_kernel(const int lengthA, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { b[i]=a[i+i*lengthA]; } } }
16cf42aecc3552aa34037b2861fdeca28dfc9ea6.cu
// B=diag(A) extern "C" { __global__ void diag_kernel(const int lengthA, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthA) { b[i]=a[i+i*lengthA]; } } }
18662111fb08b501ca15e20e3d90414e0025a68c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels.h" #include <stdio.h> #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) __global__ void resize_kernel( int i_N,float *x, int i_w, int i_h, int i_c, int o_w, int o_h, int o_c, int batch, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= i_N) return; int out_index = i; int out_w = i%o_w; i = i/o_w; int out_h = i%o_h; i = i/o_h; int out_c = i%o_c; i = i/o_c; //copying last column/last row as padding int in_index = ((i*i_c + MIN(out_c,i_c-1))*i_h + MIN(out_h,i_h-1))*i_w + MIN(out_w, i_w-1); out[out_index] = x[in_index]; } void resizeForward( dnnType* srcData, dnnType* dstData, int n, int i_c, int i_h, int i_w, int o_c, int o_h, int o_w, hipStream_t stream ) { int i_size = n*i_c*i_h*i_w; int o_size = n*o_c*o_h*o_w; int blocks = (o_size+255)/256; int threads = 256; if(i_c == o_c && i_h == o_h && i_w == o_w ) { checkCuda(hipMemcpy(dstData, srcData, i_size*sizeof(dnnType), hipMemcpyDeviceToDevice)); } else { checkCuda(hipMemset(dstData, 0, o_size*sizeof(dnnType))); hipLaunchKernelGGL(( resize_kernel), dim3(blocks), dim3(threads), 0, stream, o_size, srcData, i_w, i_h, i_c, o_w, o_h, o_c, n, dstData); // printDeviceVector(i_size, srcData); // printDeviceVector(o_size, dstData); } }
18662111fb08b501ca15e20e3d90414e0025a68c.cu
#include "kernels.h" #include <stdio.h> #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) __global__ void resize_kernel( int i_N,float *x, int i_w, int i_h, int i_c, int o_w, int o_h, int o_c, int batch, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= i_N) return; int out_index = i; int out_w = i%o_w; i = i/o_w; int out_h = i%o_h; i = i/o_h; int out_c = i%o_c; i = i/o_c; //copying last column/last row as padding int in_index = ((i*i_c + MIN(out_c,i_c-1))*i_h + MIN(out_h,i_h-1))*i_w + MIN(out_w, i_w-1); out[out_index] = x[in_index]; } void resizeForward( dnnType* srcData, dnnType* dstData, int n, int i_c, int i_h, int i_w, int o_c, int o_h, int o_w, cudaStream_t stream ) { int i_size = n*i_c*i_h*i_w; int o_size = n*o_c*o_h*o_w; int blocks = (o_size+255)/256; int threads = 256; if(i_c == o_c && i_h == o_h && i_w == o_w ) { checkCuda(cudaMemcpy(dstData, srcData, i_size*sizeof(dnnType), cudaMemcpyDeviceToDevice)); } else { checkCuda(cudaMemset(dstData, 0, o_size*sizeof(dnnType))); resize_kernel<<<blocks, threads, 0, stream>>>(o_size, srcData, i_w, i_h, i_c, o_w, o_h, o_c, n, dstData); // printDeviceVector(i_size, srcData); // printDeviceVector(o_size, dstData); } }
daa700b64c1787df85e7d09db6f741702d0cf464.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "octree_nn.h" #include "device_alternate.h" #include <thrust/transform_scan.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/replace.h> #include <thrust/sequence.h> template <typename Dtype> inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); template <> inline __device__ float caffe_gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } // double atomicAdd implementation taken from: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG template <> inline __device__ double caffe_gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void memset_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void memset_gpu(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); return; } hipLaunchKernelGGL(( memset_kernel<Dtype>) , dim3(CudaGetBlocks(N)), dim3(kCudaThreadsNum) , 0, 0, N, alpha, Y); } template <typename Dtype> void memcpy_gpu(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, sizeof(Dtype) * N, hipMemcpyDefault)); } } template <typename Dtype> __global__ void pad_forward_kernel(Dtype* Y, const int Hy, const Dtype* X, const int Hx, const int* label, const int n, const Dtype dval) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; Y[i] = idx == -1 ? dval : X[c * Hx + idx]; } } template <typename Dtype> __global__ void pad_backward_kernel(Dtype* X, const int Hx, const Dtype* Y, const int Hy, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; if (idx != -1) { X[c * Hx + idx] = Y[i]; } } } template<typename Dtype> void pad_forward_gpu(Dtype* Y, const int Hy, const int Cy, const Dtype* X, const int Hx, const int* label, const Dtype dval) { int n = Hy * Cy; // Note: Cx == Cy hipLaunchKernelGGL(( pad_forward_kernel<Dtype>) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, Y, Hy, X, Hx, label, n, dval); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void pad_backward_gpu(Dtype* X, const int Hx, const int Cx, const Dtype* Y, const int Hy, const int* label) { int n = Hy * Cx; // Note: Cx == Cy hipLaunchKernelGGL(( pad_backward_kernel<Dtype>) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, X, Hx, Y, Hy, label, n); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree2col_kernel(Dtype* data_col, const Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) { data_col[i] = 0; continue; } int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; data_col[i] = p == -1 ? Dtype(0) : data_octree[c * octree_h + p]; } } template <typename Dtype> __global__ void col2octree_kernel(const Dtype* data_col, Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) continue; int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; if (p != -1) caffe_gpu_atomic_add(data_col[i], data_octree + c * octree_h + p); } } template <typename Dtype> void octree2col_gpu(Dtype* data_col, const Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; const int thread_num = channel * kernel * height_col; hipLaunchKernelGGL(( octree2col_kernel<Dtype>) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void col2octree_gpu(const Dtype* data_col, Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; // kernel size: 3*3*3 const int thread_num = channel * kernel * height_col; int octree_h = height << 3 * (stride - 1); // set data_octree to zero ONCE when n ==0 if (n == 0) memset_gpu(channel * octree_h, Dtype(0), data_octree); hipLaunchKernelGGL(( col2octree_kernel<Dtype>) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_pool_kernel(Dtype* top_data, const int top_h, int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % top_h; int c = i / top_h; int hb = 8 * h; int max_idx = hb; btm_data += c * btm_h; Dtype max_val = btm_data[hb]; #pragma unroll 7 for (int idx = hb + 1; idx < hb + 8; ++idx) { Dtype value = btm_data[idx]; if (value > max_val) { max_idx = idx; max_val = value; } } top_data[i] = max_val; mask[i] = max_idx; } } template<typename Dtype> void octree_max_pool_gpu(Dtype* top_data, int top_h, int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; hipLaunchKernelGGL(( octree_max_pool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_unpool_kernel(const Dtype* top_data, const int top_h, const int* mask, Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; btm_data[c * btm_h + mask[i]] = top_data[i]; } } template<typename Dtype> void octree_max_unpool_gpu(const Dtype* top_data, int top_h, const int* mask, Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; memset_gpu(btm_h * channel, Dtype(0), btm_data); hipLaunchKernelGGL(( octree_max_unpool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_mask_pool_kernel(Dtype* top_data, const int top_h, const int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; top_data[i] = btm_data[c * btm_h + mask[i]]; } } template<typename Dtype> void octree_mask_pool_gpu(Dtype* top_data, int top_h, const int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; hipLaunchKernelGGL(( octree_mask_pool_kernel<Dtype>) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } __global__ void calc_neigh_kernel(int* neigh_split, const int* neigh, const int* children, const int* parent, const int* dis, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 6; int j = id % 64; int l0 = children[i]; if (l0 != -1) { const int* ngh0 = neigh + (i >> 3 << 6); const int* pi0 = parent + (i % 8) * 64; int* ngh1 = neigh_split + (l0 << 6); int t = -1; int k = ngh0[pi0[j]]; if (k != -1) { int l1 = children[k]; if (l1 != -1) { t = (l1 << 3) + dis[j]; } } ngh1[j] = t; } } } void calc_neigh_gpu(int* neigh_split, const int* neigh, const int* children, const int node_num, const int* parent, const int* dis) { int n = node_num << 6; // node_num: the non_empty node number of parent layer hipLaunchKernelGGL(( calc_neigh_kernel) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, neigh_split, neigh, children, parent, dis, n); } __global__ void calc_full_neigh_kernel(int* neigh, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { const unsigned bound = 1 << depth; unsigned node_num = 1 << 3 * depth; unsigned num = node_num >> 3; unsigned tm = id; unsigned z = tm % 4; tm /= 4; unsigned y = tm % 4; tm /= 4; unsigned x = tm % 4; tm /= 4; unsigned i = (tm % num) * 8; unsigned n = tm / num; unsigned x0 = 0, y0 = 0, z0 = 0; #pragma unroll 4 for (unsigned d = 0; d < depth; d++) { x0 |= (i & (1 << 3 * d + 2)) >> (2 * d + 2); y0 |= (i & (1 << 3 * d + 1)) >> (2 * d + 1); z0 |= (i & (1 << 3 * d + 0)) >> (2 * d + 0); } unsigned x1 = x0 + x - 1; unsigned y1 = y0 + y - 1; unsigned z1 = z0 + z - 1; int v = -1; if ((x1 & bound) == 0 && (y1 & bound) == 0 && (z1 & bound) == 0) { unsigned key1 = 0; #pragma unroll 4 for (int d = 0; d < depth; d++) { unsigned mask = 1u << d; key1 |= ((x1 & mask) << (2 * d + 2)) | ((y1 & mask) << (2 * d + 1)) | ((z1 & mask) << (2 * d)); } v = key1 + n * node_num; } neigh[id] = v; } } void calc_neigh_gpu(int* neigh, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth + 3); hipLaunchKernelGGL(( calc_full_neigh_kernel) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, neigh, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } __global__ void gen_key_kernel(uint32* key_child, const uint32* key, const int* child, const int thread_num) { typedef unsigned char ubyte; CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 3; int j = id % 8; int label = child[i]; if (label != -1) { const ubyte* k0 = (const ubyte*)(key + i); ubyte* k1 = (ubyte*)(key_child + 8 * label + j); k1[0] = (k0[0] << 1) | ((j & 4) >> 2); k1[1] = (k0[1] << 1) | ((j & 2) >> 1); k1[2] = (k0[2] << 1) | (j & 1); k1[3] = k0[3]; } } } // use the information from parent layer to calculate the key of current layer void generate_key_gpu(uint32* key_child, const uint32* key, const int* child, const int node_num) { int n = node_num << 3; // node_num: the node number of parent layer hipLaunchKernelGGL(( gen_key_kernel) , dim3(CudaGetBlocks(n)), dim3(kCudaThreadsNum) , 0, 0, key_child, key, child, n); CUDA_POST_KERNEL_CHECK; } __global__ void gen_full_key_kernel(uint32* key, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { unsigned node_num = 1 << 3 * depth; unsigned k = i % node_num; unsigned xyz = 0; unsigned char* ptr = (unsigned char*)(&xyz); #pragma unroll 8 for (int d = 0; d < depth; d++) { ptr[0] |= (k & (1 << 3 * d + 2)) >> (2 * d + 2); ptr[1] |= (k & (1 << 3 * d + 1)) >> (2 * d + 1); ptr[2] |= (k & (1 << 3 * d + 0)) >> (2 * d + 0); } ptr[3] = i / node_num; key[i] = xyz; } } void generate_key_gpu(uint32* key, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth); hipLaunchKernelGGL(( gen_full_key_kernel) , dim3(CudaGetBlocks(thread_num)), dim3(kCudaThreadsNum) , 0, 0, key, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void generate_label_gpu(int* label_data, int& top_h, const Dtype* btm_data, const int btm_h, const int mask) { top_h = 0; thrust::transform_exclusive_scan(thrust::device, btm_data, btm_data + btm_h, label_data, mask == thrust::placeholders::_1, 0, thrust::plus<int>()); hipMemcpy(&top_h, label_data + btm_h - 1, sizeof(int), hipMemcpyDeviceToHost); Dtype flag = -1; hipMemcpy(&flag, btm_data + btm_h - 1, sizeof(Dtype), hipMemcpyDeviceToHost); if (mask == flag) top_h++; thrust::replace_if(thrust::device, label_data, label_data + btm_h, btm_data, mask != thrust::placeholders::_1, -1); } __global__ void bilinear_neigh_kernel(int* bidx, const int* neigh, const int* child, const int node_num, const int* table) { CUDA_KERNEL_LOOP(i, node_num) { int cld = child[i]; if (cld < 0) continue; // skip empty node const int* nghi = neigh + (i >> 3 << 6); #pragma unroll 8 for (int j = 0; j < 8; ++j) { int k = (cld * 8 + j); // child id int* des = bidx + k * 8; const int* tb = table + ((i % 8) * 8 + j) * 8; for (int k = 0; k < 8; ++k) { des[k] = nghi[tb[k]]; } } } } void bilinear_neigh_gpu(int* bidx, const int* neigh, const int* child, const int node_num, const int* table) { hipLaunchKernelGGL(( bilinear_neigh_kernel) , dim3(CudaGetBlocks(node_num)), dim3(kCudaThreadsNum) , 0, 0, bidx, neigh, child, node_num, table); CUDA_POST_KERNEL_CHECK; } __global__ void bilinear_xyz_kernel(uint32* xyz0, float* fracs, const uint32* xyz1, const float scale, const int num) { const int mask[8][3] = { // bilinear mask: {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {1, 0, 0}, // 27, 9, 9, 9 {0, 1, 1}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, // 3, 3, 3, 1 }; CUDA_KERNEL_LOOP(i, num) { float pt[3] = { 0.0f }; float* frac = fracs + 3 * i; int bnd[2][3] = { 0 }; const unsigned char* ptr1 = (const unsigned char*)(xyz1 + i); #pragma unroll 3 for (int c = 0; c < 3; ++c) { pt[c] = (static_cast<float>(ptr1[c]) + 0.5f) / scale - 0.5f; int b = static_cast<int>(pt[c]); frac[c] = pt[c] - static_cast<float>(b); if (frac[c] > 0.5f) { bnd[0][c] = b + 1; bnd[1][c] = b; } else { frac[c] = 1 - frac[c]; bnd[0][c] = b; bnd[1][c] = b + 1; } } #pragma unroll 8 for (int j = 0; j < 8; ++j) { unsigned char* ptr0 = (unsigned char*)(xyz0 + i * 8 + j); for (int c = 0; c < 3; ++c) { ptr0[c] = static_cast<unsigned char>(bnd[mask[j][c]][c]); } ptr0[3] = ptr1[3]; } } } void bilinear_xyz_gpu(uint32* xyz0, float* fracs, const int d0, const uint32* xyz1, const int d1, const int num) { const float scale = static_cast<float>(1 << (d1 - d0)); hipLaunchKernelGGL(( bilinear_xyz_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, xyz0, fracs, xyz1, scale, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void sequence_gpu(Dtype* ptr, const int num) { thrust::sequence(thrust::device, ptr, ptr + num); } __global__ void validate_search_kernel(int* idx, const unsigned* key, const int n_key, const unsigned* query, const int n_query) { CUDA_KERNEL_LOOP(i, n_query) { int j = idx[i]; if (j >= n_key || key[j] != query[i]) idx[i] = -1; } } void search_key_gpu(int* idx, const uint32* key, const int n_key, const uint32* query, const int n_query) { thrust::lower_bound(thrust::device, key, key + n_key, query, query + n_query, idx); hipLaunchKernelGGL(( validate_search_kernel) , dim3(CudaGetBlocks(n_query)), dim3(kCudaThreadsNum) , 0, 0, idx, key, n_key, query, n_query); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void xyz2key_kernel(uint32* key, const uint32* xyz, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 xyz_in = xyz[i]; uint32 key_out = 0; unsigned char* ptr = (unsigned char*)(&xyz_in); unsigned char* ptr_out = (unsigned char*)(&key_out); #pragma unroll 8 for (int d = 0; d < depth; ++d) { unsigned char mask = 1 << d; key_out |= (ptr[0] & mask) << (2 * d + 2) | (ptr[1] & mask) << (2 * d + 1) | (ptr[2] & mask) << (2 * d + 0); } ptr_out[3] = ptr[3]; key[i] = key_out; } } void xyz2key_gpu(uint32* key, const uint32* xyz, const int num, const int depth) { hipLaunchKernelGGL(( xyz2key_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, key, xyz, num, depth); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void key2xyz_kernel(uint32* xyz, const uint32* key, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 key_in = key[i], xyz_out = 0; unsigned char* pt = (unsigned char*)(&xyz_out); unsigned char* ptr = (unsigned char*)(&key_in); pt[3] = ptr[3]; #pragma unroll 8 for (int d = 0; d < depth; d++) { pt[0] |= (key_in & (1u << (3 * d + 2))) >> (2 * d + 2); pt[1] |= (key_in & (1u << (3 * d + 1))) >> (2 * d + 1); pt[2] |= (key_in & (1u << (3 * d))) >> (2 * d); } xyz[i] = xyz_out; } } void key2xyz_gpu(uint32* xyz, const uint32* key, const int num, const int depth) { hipLaunchKernelGGL(( key2xyz_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, xyz, key, num, depth); CUDA_POST_KERNEL_CHECK; } __global__ void key2idx_kernel(int* idx, const uint32* key, const int num) { CUDA_KERNEL_LOOP(i, num) { const unsigned char* ptr = (const unsigned char*)(key + i); idx[i] = static_cast<int>(ptr[3]); } } void key2idx_gpu(int* idx, const uint32* key, const int num) { hipLaunchKernelGGL(( key2idx_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, idx, key, num); CUDA_POST_KERNEL_CHECK; } __global__ void xyz2coord_kernel(float* pt, const uint32* xyz, const int num, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % num, c = i / num; const unsigned char* ptr = (const unsigned char*)(xyz + h); pt[i] = static_cast<float>(ptr[c]); // ref: pt[c * num + h] = static_cast<float>(ptr[c]); } } void xyz2coord_gpu(float* pt, const uint32* xyz, const int num, const int channel) { int nthreads = num * channel; hipLaunchKernelGGL(( xyz2coord_kernel) , dim3(CudaGetBlocks(nthreads)), dim3(kCudaThreadsNum) , 0, 0, pt, xyz, num, nthreads); CUDA_POST_KERNEL_CHECK; } __global__ void coord2xyz_kernel(uint32* xyz, const float* pt, const int num, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % num, c = i / num; unsigned char* ptr = (unsigned char*)(xyz + h); ptr[c] = static_cast<unsigned char>(pt[i]); } } void coord2xyz_gpu(uint32* xyz, const float* pt, const int num, const int channel) { int nthreads = num * channel; hipLaunchKernelGGL(( coord2xyz_kernel) , dim3(CudaGetBlocks(nthreads)), dim3(kCudaThreadsNum) , 0, 0, xyz, pt, num, nthreads); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_forward_kernel(Dtype* top_data, const int top_h, const Dtype* btm_data, const int btm_h, const int* index_data, const int btm_num) { CUDA_KERNEL_LOOP(i, btm_num) { int h = i % btm_h; int c = i / btm_h; int j = index_data[h]; if (j != -1) { top_data[c * top_h + j] = btm_data[i]; } } } template <typename Dtype> void align_forward_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx) { int btm_num = btm_h * channel; memset_gpu(top_h * channel, Dtype(0), top_data); hipLaunchKernelGGL(( align_forward_kernel) , dim3(CudaGetBlocks(btm_num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, btm_data, btm_h, idx, btm_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_backward_kernel(const Dtype* top_data, const int top_h, Dtype* btm_data, const int btm_h, const int* index_data, const int btm_num) { CUDA_KERNEL_LOOP(i, btm_num) { int h = i % btm_h; int c = i / btm_h; int j = index_data[h]; btm_data[i] = j == -1 ? 0 : top_data[c * top_h + j]; } } template <typename Dtype> void align_backward_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx) { int btm_num = btm_h * channel; hipLaunchKernelGGL(( align_backward_kernel) , dim3(CudaGetBlocks(btm_num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, btm_data, btm_h, idx, btm_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_gather_kernel(Dtype* top_data, const int top_h, const Dtype* btm_data, const int btm_h, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % top_h; int c = i / top_h; int j = index_data[h]; if (j != -1) { top_data[i] = btm_data[c * btm_h + j]; } } } template <typename Dtype> void octree_gather_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx) { pad_forward_gpu<Dtype>(top_data, top_h, channel, btm_data, btm_h, idx, Dtype(0)); //int num = top_h * channel; //memset_gpu(num, Dtype(0), top_data); //octree_gather_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( // top_data, top_h, btm_data, btm_h, idx, num); //CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_gatherbk_kernel(const Dtype* top_data, const int top_h, Dtype* btm_data, const int btm_h, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % top_h; int c = i / top_h; int j = index_data[h]; if (j != -1) { caffe_gpu_atomic_add(top_data[i], btm_data + c * btm_h + j); } } } template <typename Dtype> void octree_gatherbk_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx) { int num = top_h * channel; memset_gpu(channel * btm_h, Dtype(0), btm_data); hipLaunchKernelGGL(( octree_gatherbk_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } __global__ void octree_mask_kernel(float* des, const float* src, const int* label_data, const int height, const int mask, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % height; des[i] = label_data[h] == mask ? float(0) : src[i]; } } void octree_mask_gpu(float* out_data, const float* in_data, const int* label, int height, int mask, int num) { hipLaunchKernelGGL(( octree_mask_kernel) , dim3(CudaGetBlocks(num)), dim3(kCudaThreadsNum) , 0, 0, out_data, in_data, label, height, mask, num); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void memset_gpu<int>(const int N, const int alpha, int* Y); template void memset_gpu<float>(const int N, const float alpha, float* Y); template void memset_gpu<double>(const int N, const double alpha, double* Y); template void memset_gpu<char>(const int N, const char alpha, char* Y); template void memset_gpu<int8_t>(const int N, const int8_t alpha, int8_t* Y); template void memset_gpu<uint8_t>(const int N, const uint8_t alpha, uint8_t* Y); template void memcpy_gpu<int>(const int N, const int* X, int* Y); template void memcpy_gpu<unsigned>(const int N, const unsigned* X, unsigned* Y); template void memcpy_gpu<float>(const int N, const float* X, float* Y); template void memcpy_gpu<double>(const int N, const double* X, double* Y); template void sequence_gpu<int>(int* ptr, const int num); template void sequence_gpu<unsigned int>(unsigned int* ptr, const int num); template void pad_forward_gpu<float>(float* Y, const int Hy, const int Cy, const float* X, const int Hx, const int* label, const float dval); template void pad_forward_gpu<double>(double* Y, const int Hy, const int Cy, const double* X, const int Hx, const int* label, const double dval); template void pad_backward_gpu<float>(float* X, const int Hx, const int Cx, const float* Y, const int Hy, const int* label); template void pad_backward_gpu<double>(double* X, const int Hx, const int Cx, const double* Y, const int Hy, const int* label); template void octree2col_gpu<float>(float* data_col, const float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void octree2col_gpu<double>(double* data_col, const double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<float>(const float* data_col, float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<double>(const double* data_col, double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void generate_label_gpu<float>(int* label_data, int& top_h, const float* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<double>(int* label_data, int& top_h, const double* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<int>(int* label_data, int& top_h, const int* bottom_data, const int bottom_h, const int mask); template void octree_max_pool_gpu<float>(float* top_data, int top_h, int* mask, const float* btm_data, int bottom_h, int channel); template void octree_max_pool_gpu<double>(double* top_data, int top_h, int* mask, const double* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<float>(const float* top_data, int top_h, const int* mask, float* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<double>(const double* top_data, int top_h, const int* mask, double* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<float>(float* top_data, int top_h, const int* mask, const float* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<double>(double* top_data, int top_h, const int* mask, const double* btm_data, int bottom_h, int channel); template void align_forward_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx); template void align_forward_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx); template void align_backward_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx); template void align_backward_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx); template void octree_gather_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx); template void octree_gather_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx); template void octree_gatherbk_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx); template void octree_gatherbk_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx);
daa700b64c1787df85e7d09db6f741702d0cf464.cu
#include "octree_nn.h" #include "device_alternate.h" #include <thrust/transform_scan.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/functional.h> #include <thrust/replace.h> #include <thrust/sequence.h> template <typename Dtype> inline __device__ Dtype caffe_gpu_atomic_add(const Dtype val, Dtype* address); template <> inline __device__ float caffe_gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } // double atomicAdd implementation taken from: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#axzz3PVCpVsEG template <> inline __device__ double caffe_gpu_atomic_add(const double val, double* address) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <typename Dtype> __global__ void memset_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void memset_gpu(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); return; } memset_kernel<Dtype> <<< CudaGetBlocks(N), kCudaThreadsNum >>> ( N, alpha, Y); } template <typename Dtype> void memcpy_gpu(const int N, const Dtype* X, Dtype* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); } } template <typename Dtype> __global__ void pad_forward_kernel(Dtype* Y, const int Hy, const Dtype* X, const int Hx, const int* label, const int n, const Dtype dval) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; Y[i] = idx == -1 ? dval : X[c * Hx + idx]; } } template <typename Dtype> __global__ void pad_backward_kernel(Dtype* X, const int Hx, const Dtype* Y, const int Hy, const int* label, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % Hy; int c = i / Hy; int idx = label[h]; if (idx != -1) { X[c * Hx + idx] = Y[i]; } } } template<typename Dtype> void pad_forward_gpu(Dtype* Y, const int Hy, const int Cy, const Dtype* X, const int Hx, const int* label, const Dtype dval) { int n = Hy * Cy; // Note: Cx == Cy pad_forward_kernel<Dtype> <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( Y, Hy, X, Hx, label, n, dval); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void pad_backward_gpu(Dtype* X, const int Hx, const int Cx, const Dtype* Y, const int Hy, const int* label) { int n = Hy * Cx; // Note: Cx == Cy pad_backward_kernel<Dtype> <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( X, Hx, Y, Hy, label, n); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree2col_kernel(Dtype* data_col, const Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) { data_col[i] = 0; continue; } int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; data_col[i] = p == -1 ? Dtype(0) : data_octree[c * octree_h + p]; } } template <typename Dtype> __global__ void col2octree_kernel(const Dtype* data_col, Dtype* data_octree, const int height, const int kernel_dim, const int stride, const int* neigh, const int* ni, const int height_col, const int n, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { int h = i % height_col; int h1 = h + n * height_col; if (h1 >= height) continue; int t = i / height_col; int k = t % kernel_dim; int c = t / kernel_dim; int octree_h = height << 3 * (stride - 1); int index = stride == 2 ? (h1 << 6) + ni[k] : (h1 >> 3 << 6) + ni[(h1 % 8) * kernel_dim + k]; int p = neigh[index]; if (p != -1) caffe_gpu_atomic_add(data_col[i], data_octree + c * octree_h + p); } } template <typename Dtype> void octree2col_gpu(Dtype* data_col, const Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; const int thread_num = channel * kernel * height_col; octree2col_kernel<Dtype> <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void col2octree_gpu(const Dtype* data_col, Dtype* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n) { const int kernel = kernel_sdim; // kernel size: 3*3*3 const int thread_num = channel * kernel * height_col; int octree_h = height << 3 * (stride - 1); // set data_octree to zero ONCE when n ==0 if (n == 0) memset_gpu(channel * octree_h, Dtype(0), data_octree); col2octree_kernel<Dtype> <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( data_col, data_octree, height, kernel, stride, neigh, ni, height_col, n, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_pool_kernel(Dtype* top_data, const int top_h, int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % top_h; int c = i / top_h; int hb = 8 * h; int max_idx = hb; btm_data += c * btm_h; Dtype max_val = btm_data[hb]; #pragma unroll 7 for (int idx = hb + 1; idx < hb + 8; ++idx) { Dtype value = btm_data[idx]; if (value > max_val) { max_idx = idx; max_val = value; } } top_data[i] = max_val; mask[i] = max_idx; } } template<typename Dtype> void octree_max_pool_gpu(Dtype* top_data, int top_h, int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; octree_max_pool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_max_unpool_kernel(const Dtype* top_data, const int top_h, const int* mask, Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; btm_data[c * btm_h + mask[i]] = top_data[i]; } } template<typename Dtype> void octree_max_unpool_gpu(const Dtype* top_data, int top_h, const int* mask, Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; memset_gpu(btm_h * channel, Dtype(0), btm_data); octree_max_unpool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_mask_pool_kernel(Dtype* top_data, const int top_h, const int* mask, const Dtype* btm_data, const int btm_h, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int c = i / top_h; top_data[i] = btm_data[c * btm_h + mask[i]]; } } template<typename Dtype> void octree_mask_pool_gpu(Dtype* top_data, int top_h, const int* mask, const Dtype* btm_data, int btm_h, int channel) { int num = top_h * channel; octree_mask_pool_kernel<Dtype> <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, mask, btm_data, btm_h, num); CUDA_POST_KERNEL_CHECK; } __global__ void calc_neigh_kernel(int* neigh_split, const int* neigh, const int* children, const int* parent, const int* dis, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 6; int j = id % 64; int l0 = children[i]; if (l0 != -1) { const int* ngh0 = neigh + (i >> 3 << 6); const int* pi0 = parent + (i % 8) * 64; int* ngh1 = neigh_split + (l0 << 6); int t = -1; int k = ngh0[pi0[j]]; if (k != -1) { int l1 = children[k]; if (l1 != -1) { t = (l1 << 3) + dis[j]; } } ngh1[j] = t; } } } void calc_neigh_gpu(int* neigh_split, const int* neigh, const int* children, const int node_num, const int* parent, const int* dis) { int n = node_num << 6; // node_num: the non_empty node number of parent layer calc_neigh_kernel <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( neigh_split, neigh, children, parent, dis, n); } __global__ void calc_full_neigh_kernel(int* neigh, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(id, thread_num) { const unsigned bound = 1 << depth; unsigned node_num = 1 << 3 * depth; unsigned num = node_num >> 3; unsigned tm = id; unsigned z = tm % 4; tm /= 4; unsigned y = tm % 4; tm /= 4; unsigned x = tm % 4; tm /= 4; unsigned i = (tm % num) * 8; unsigned n = tm / num; unsigned x0 = 0, y0 = 0, z0 = 0; #pragma unroll 4 for (unsigned d = 0; d < depth; d++) { x0 |= (i & (1 << 3 * d + 2)) >> (2 * d + 2); y0 |= (i & (1 << 3 * d + 1)) >> (2 * d + 1); z0 |= (i & (1 << 3 * d + 0)) >> (2 * d + 0); } unsigned x1 = x0 + x - 1; unsigned y1 = y0 + y - 1; unsigned z1 = z0 + z - 1; int v = -1; if ((x1 & bound) == 0 && (y1 & bound) == 0 && (z1 & bound) == 0) { unsigned key1 = 0; #pragma unroll 4 for (int d = 0; d < depth; d++) { unsigned mask = 1u << d; key1 |= ((x1 & mask) << (2 * d + 2)) | ((y1 & mask) << (2 * d + 1)) | ((z1 & mask) << (2 * d)); } v = key1 + n * node_num; } neigh[id] = v; } } void calc_neigh_gpu(int* neigh, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth + 3); calc_full_neigh_kernel <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( neigh, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } __global__ void gen_key_kernel(uint32* key_child, const uint32* key, const int* child, const int thread_num) { typedef unsigned char ubyte; CUDA_KERNEL_LOOP(id, thread_num) { int i = id >> 3; int j = id % 8; int label = child[i]; if (label != -1) { const ubyte* k0 = (const ubyte*)(key + i); ubyte* k1 = (ubyte*)(key_child + 8 * label + j); k1[0] = (k0[0] << 1) | ((j & 4) >> 2); k1[1] = (k0[1] << 1) | ((j & 2) >> 1); k1[2] = (k0[2] << 1) | (j & 1); k1[3] = k0[3]; } } } // use the information from parent layer to calculate the key of current layer void generate_key_gpu(uint32* key_child, const uint32* key, const int* child, const int node_num) { int n = node_num << 3; // node_num: the node number of parent layer gen_key_kernel <<< CudaGetBlocks(n), kCudaThreadsNum >>> ( key_child, key, child, n); CUDA_POST_KERNEL_CHECK; } __global__ void gen_full_key_kernel(uint32* key, const int depth, const int batch_size, const int thread_num) { CUDA_KERNEL_LOOP(i, thread_num) { unsigned node_num = 1 << 3 * depth; unsigned k = i % node_num; unsigned xyz = 0; unsigned char* ptr = (unsigned char*)(&xyz); #pragma unroll 8 for (int d = 0; d < depth; d++) { ptr[0] |= (k & (1 << 3 * d + 2)) >> (2 * d + 2); ptr[1] |= (k & (1 << 3 * d + 1)) >> (2 * d + 1); ptr[2] |= (k & (1 << 3 * d + 0)) >> (2 * d + 0); } ptr[3] = i / node_num; key[i] = xyz; } } void generate_key_gpu(uint32* key, const int depth, const int batch_size) { int thread_num = batch_size * (1 << 3 * depth); gen_full_key_kernel <<< CudaGetBlocks(thread_num), kCudaThreadsNum >>> ( key, depth, batch_size, thread_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void generate_label_gpu(int* label_data, int& top_h, const Dtype* btm_data, const int btm_h, const int mask) { top_h = 0; thrust::transform_exclusive_scan(thrust::device, btm_data, btm_data + btm_h, label_data, mask == thrust::placeholders::_1, 0, thrust::plus<int>()); cudaMemcpy(&top_h, label_data + btm_h - 1, sizeof(int), cudaMemcpyDeviceToHost); Dtype flag = -1; cudaMemcpy(&flag, btm_data + btm_h - 1, sizeof(Dtype), cudaMemcpyDeviceToHost); if (mask == flag) top_h++; thrust::replace_if(thrust::device, label_data, label_data + btm_h, btm_data, mask != thrust::placeholders::_1, -1); } __global__ void bilinear_neigh_kernel(int* bidx, const int* neigh, const int* child, const int node_num, const int* table) { CUDA_KERNEL_LOOP(i, node_num) { int cld = child[i]; if (cld < 0) continue; // skip empty node const int* nghi = neigh + (i >> 3 << 6); #pragma unroll 8 for (int j = 0; j < 8; ++j) { int k = (cld * 8 + j); // child id int* des = bidx + k * 8; const int* tb = table + ((i % 8) * 8 + j) * 8; for (int k = 0; k < 8; ++k) { des[k] = nghi[tb[k]]; } } } } void bilinear_neigh_gpu(int* bidx, const int* neigh, const int* child, const int node_num, const int* table) { bilinear_neigh_kernel <<< CudaGetBlocks(node_num), kCudaThreadsNum >>> ( bidx, neigh, child, node_num, table); CUDA_POST_KERNEL_CHECK; } __global__ void bilinear_xyz_kernel(uint32* xyz0, float* fracs, const uint32* xyz1, const float scale, const int num) { const int mask[8][3] = { // bilinear mask: {0, 0, 0}, {0, 0, 1}, {0, 1, 0}, {1, 0, 0}, // 27, 9, 9, 9 {0, 1, 1}, {1, 0, 1}, {1, 1, 0}, {1, 1, 1}, // 3, 3, 3, 1 }; CUDA_KERNEL_LOOP(i, num) { float pt[3] = { 0.0f }; float* frac = fracs + 3 * i; int bnd[2][3] = { 0 }; const unsigned char* ptr1 = (const unsigned char*)(xyz1 + i); #pragma unroll 3 for (int c = 0; c < 3; ++c) { pt[c] = (static_cast<float>(ptr1[c]) + 0.5f) / scale - 0.5f; int b = static_cast<int>(pt[c]); frac[c] = pt[c] - static_cast<float>(b); if (frac[c] > 0.5f) { bnd[0][c] = b + 1; bnd[1][c] = b; } else { frac[c] = 1 - frac[c]; bnd[0][c] = b; bnd[1][c] = b + 1; } } #pragma unroll 8 for (int j = 0; j < 8; ++j) { unsigned char* ptr0 = (unsigned char*)(xyz0 + i * 8 + j); for (int c = 0; c < 3; ++c) { ptr0[c] = static_cast<unsigned char>(bnd[mask[j][c]][c]); } ptr0[3] = ptr1[3]; } } } void bilinear_xyz_gpu(uint32* xyz0, float* fracs, const int d0, const uint32* xyz1, const int d1, const int num) { const float scale = static_cast<float>(1 << (d1 - d0)); bilinear_xyz_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( xyz0, fracs, xyz1, scale, num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void sequence_gpu(Dtype* ptr, const int num) { thrust::sequence(thrust::device, ptr, ptr + num); } __global__ void validate_search_kernel(int* idx, const unsigned* key, const int n_key, const unsigned* query, const int n_query) { CUDA_KERNEL_LOOP(i, n_query) { int j = idx[i]; if (j >= n_key || key[j] != query[i]) idx[i] = -1; } } void search_key_gpu(int* idx, const uint32* key, const int n_key, const uint32* query, const int n_query) { thrust::lower_bound(thrust::device, key, key + n_key, query, query + n_query, idx); validate_search_kernel <<< CudaGetBlocks(n_query), kCudaThreadsNum >>> ( idx, key, n_key, query, n_query); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void xyz2key_kernel(uint32* key, const uint32* xyz, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 xyz_in = xyz[i]; uint32 key_out = 0; unsigned char* ptr = (unsigned char*)(&xyz_in); unsigned char* ptr_out = (unsigned char*)(&key_out); #pragma unroll 8 for (int d = 0; d < depth; ++d) { unsigned char mask = 1 << d; key_out |= (ptr[0] & mask) << (2 * d + 2) | (ptr[1] & mask) << (2 * d + 1) | (ptr[2] & mask) << (2 * d + 0); } ptr_out[3] = ptr[3]; key[i] = key_out; } } void xyz2key_gpu(uint32* key, const uint32* xyz, const int num, const int depth) { xyz2key_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( key, xyz, num, depth); CUDA_POST_KERNEL_CHECK; } // NOTE: !!! currently the depth should be less than 8 __global__ void key2xyz_kernel(uint32* xyz, const uint32* key, const int num, const int depth) { CUDA_KERNEL_LOOP(i, num) { uint32 key_in = key[i], xyz_out = 0; unsigned char* pt = (unsigned char*)(&xyz_out); unsigned char* ptr = (unsigned char*)(&key_in); pt[3] = ptr[3]; #pragma unroll 8 for (int d = 0; d < depth; d++) { pt[0] |= (key_in & (1u << (3 * d + 2))) >> (2 * d + 2); pt[1] |= (key_in & (1u << (3 * d + 1))) >> (2 * d + 1); pt[2] |= (key_in & (1u << (3 * d))) >> (2 * d); } xyz[i] = xyz_out; } } void key2xyz_gpu(uint32* xyz, const uint32* key, const int num, const int depth) { key2xyz_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( xyz, key, num, depth); CUDA_POST_KERNEL_CHECK; } __global__ void key2idx_kernel(int* idx, const uint32* key, const int num) { CUDA_KERNEL_LOOP(i, num) { const unsigned char* ptr = (const unsigned char*)(key + i); idx[i] = static_cast<int>(ptr[3]); } } void key2idx_gpu(int* idx, const uint32* key, const int num) { key2idx_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> (idx, key, num); CUDA_POST_KERNEL_CHECK; } __global__ void xyz2coord_kernel(float* pt, const uint32* xyz, const int num, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % num, c = i / num; const unsigned char* ptr = (const unsigned char*)(xyz + h); pt[i] = static_cast<float>(ptr[c]); // ref: pt[c * num + h] = static_cast<float>(ptr[c]); } } void xyz2coord_gpu(float* pt, const uint32* xyz, const int num, const int channel) { int nthreads = num * channel; xyz2coord_kernel <<< CudaGetBlocks(nthreads), kCudaThreadsNum >>> ( pt, xyz, num, nthreads); CUDA_POST_KERNEL_CHECK; } __global__ void coord2xyz_kernel(uint32* xyz, const float* pt, const int num, const int nthreads) { CUDA_KERNEL_LOOP(i, nthreads) { int h = i % num, c = i / num; unsigned char* ptr = (unsigned char*)(xyz + h); ptr[c] = static_cast<unsigned char>(pt[i]); } } void coord2xyz_gpu(uint32* xyz, const float* pt, const int num, const int channel) { int nthreads = num * channel; coord2xyz_kernel <<< CudaGetBlocks(nthreads), kCudaThreadsNum >>> ( xyz, pt, num, nthreads); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_forward_kernel(Dtype* top_data, const int top_h, const Dtype* btm_data, const int btm_h, const int* index_data, const int btm_num) { CUDA_KERNEL_LOOP(i, btm_num) { int h = i % btm_h; int c = i / btm_h; int j = index_data[h]; if (j != -1) { top_data[c * top_h + j] = btm_data[i]; } } } template <typename Dtype> void align_forward_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx) { int btm_num = btm_h * channel; memset_gpu(top_h * channel, Dtype(0), top_data); align_forward_kernel <<< CudaGetBlocks(btm_num), kCudaThreadsNum >>> ( top_data, top_h, btm_data, btm_h, idx, btm_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void align_backward_kernel(const Dtype* top_data, const int top_h, Dtype* btm_data, const int btm_h, const int* index_data, const int btm_num) { CUDA_KERNEL_LOOP(i, btm_num) { int h = i % btm_h; int c = i / btm_h; int j = index_data[h]; btm_data[i] = j == -1 ? 0 : top_data[c * top_h + j]; } } template <typename Dtype> void align_backward_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx) { int btm_num = btm_h * channel; align_backward_kernel <<< CudaGetBlocks(btm_num), kCudaThreadsNum >>> ( top_data, top_h, btm_data, btm_h, idx, btm_num); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_gather_kernel(Dtype* top_data, const int top_h, const Dtype* btm_data, const int btm_h, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % top_h; int c = i / top_h; int j = index_data[h]; if (j != -1) { top_data[i] = btm_data[c * btm_h + j]; } } } template <typename Dtype> void octree_gather_gpu(Dtype* top_data, const int top_h, const int channel, const Dtype* btm_data, const int btm_h, const int* idx) { pad_forward_gpu<Dtype>(top_data, top_h, channel, btm_data, btm_h, idx, Dtype(0)); //int num = top_h * channel; //memset_gpu(num, Dtype(0), top_data); //octree_gather_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( // top_data, top_h, btm_data, btm_h, idx, num); //CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void octree_gatherbk_kernel(const Dtype* top_data, const int top_h, Dtype* btm_data, const int btm_h, const int* index_data, const int num) { CUDA_KERNEL_LOOP(i, num) { int h = i % top_h; int c = i / top_h; int j = index_data[h]; if (j != -1) { caffe_gpu_atomic_add(top_data[i], btm_data + c * btm_h + j); } } } template <typename Dtype> void octree_gatherbk_gpu(const Dtype* top_data, const int top_h, const int channel, Dtype* btm_data, const int btm_h, const int* idx) { int num = top_h * channel; memset_gpu(channel * btm_h, Dtype(0), btm_data); octree_gatherbk_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( top_data, top_h, btm_data, btm_h, idx, num); CUDA_POST_KERNEL_CHECK; } __global__ void octree_mask_kernel(float* des, const float* src, const int* label_data, const int height, const int mask, const int n) { CUDA_KERNEL_LOOP(i, n) { int h = i % height; des[i] = label_data[h] == mask ? float(0) : src[i]; } } void octree_mask_gpu(float* out_data, const float* in_data, const int* label, int height, int mask, int num) { octree_mask_kernel <<< CudaGetBlocks(num), kCudaThreadsNum >>> ( out_data, in_data, label, height, mask, num); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void memset_gpu<int>(const int N, const int alpha, int* Y); template void memset_gpu<float>(const int N, const float alpha, float* Y); template void memset_gpu<double>(const int N, const double alpha, double* Y); template void memset_gpu<char>(const int N, const char alpha, char* Y); template void memset_gpu<int8_t>(const int N, const int8_t alpha, int8_t* Y); template void memset_gpu<uint8_t>(const int N, const uint8_t alpha, uint8_t* Y); template void memcpy_gpu<int>(const int N, const int* X, int* Y); template void memcpy_gpu<unsigned>(const int N, const unsigned* X, unsigned* Y); template void memcpy_gpu<float>(const int N, const float* X, float* Y); template void memcpy_gpu<double>(const int N, const double* X, double* Y); template void sequence_gpu<int>(int* ptr, const int num); template void sequence_gpu<unsigned int>(unsigned int* ptr, const int num); template void pad_forward_gpu<float>(float* Y, const int Hy, const int Cy, const float* X, const int Hx, const int* label, const float dval); template void pad_forward_gpu<double>(double* Y, const int Hy, const int Cy, const double* X, const int Hx, const int* label, const double dval); template void pad_backward_gpu<float>(float* X, const int Hx, const int Cx, const float* Y, const int Hy, const int* label); template void pad_backward_gpu<double>(double* X, const int Hx, const int Cx, const double* Y, const int Hy, const int* label); template void octree2col_gpu<float>(float* data_col, const float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void octree2col_gpu<double>(double* data_col, const double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<float>(const float* data_col, float* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void col2octree_gpu<double>(const double* data_col, double* data_octree, const int channel, const int height, const int kernel_sdim, const int stride, const int* neigh, const int* ni, const int height_col, const int n); template void generate_label_gpu<float>(int* label_data, int& top_h, const float* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<double>(int* label_data, int& top_h, const double* bottom_data, const int bottom_h, const int mask); template void generate_label_gpu<int>(int* label_data, int& top_h, const int* bottom_data, const int bottom_h, const int mask); template void octree_max_pool_gpu<float>(float* top_data, int top_h, int* mask, const float* btm_data, int bottom_h, int channel); template void octree_max_pool_gpu<double>(double* top_data, int top_h, int* mask, const double* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<float>(const float* top_data, int top_h, const int* mask, float* btm_data, int bottom_h, int channel); template void octree_max_unpool_gpu<double>(const double* top_data, int top_h, const int* mask, double* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<float>(float* top_data, int top_h, const int* mask, const float* btm_data, int bottom_h, int channel); template void octree_mask_pool_gpu<double>(double* top_data, int top_h, const int* mask, const double* btm_data, int bottom_h, int channel); template void align_forward_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx); template void align_forward_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx); template void align_backward_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx); template void align_backward_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx); template void octree_gather_gpu(float* top_data, const int top_h, const int c, const float* btm_data, const int btm_h, const int* idx); template void octree_gather_gpu(double* top_data, const int top_h, const int c, const double* btm_data, const int btm_h, const int* idx); template void octree_gatherbk_gpu(const float* top_data, const int top_h, const int c, float* btm_data, const int btm_h, const int* idx); template void octree_gatherbk_gpu(const double* top_data, const int top_h, const int c, double* btm_data, const int btm_h, const int* idx);
04fd5dda02c1446e06848115b4bed8c99d99fde4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) { if (comp <= (-0.0f * sinhf((+1.5104E-35f + var_3)))) { float tmp_1 = (var_4 - var_5 * var_6 - var_7); comp = tmp_1 - (+1.0667E-36f * var_8); if (comp > var_9 - var_10 + (-1.1912E-37f - var_11)) { comp = cosf((-1.5333E-37f * -0.0f)); comp += -1.2488E-41f * (-1.2186E-43f - (var_12 * (-1.2041E-42f - var_13 / -1.1994E-44f))); comp += +1.4314E-43f / +1.2993E-28f + +1.5299E-15f - (var_14 - log10f((var_15 - +1.8415E-37f * -1.3874E-44f - +1.5091E35f))); comp += -0.0f + -1.8204E-37f; } for (int i=0; i < var_1; ++i) { comp = var_16 - var_17 + (-1.4232E34f + (-1.3987E-44f * -0.0f * +0.0f)); } for (int i=0; i < var_2; ++i) { float tmp_2 = +1.1386E-35f + var_19 - +0.0f + var_20 / -1.2414E-37f * +1.5185E-43f; comp = tmp_2 / (var_21 * (-1.4031E13f + var_22 / (var_23 + var_24))); var_18[i] = (var_25 / (-1.9526E-24f / +1.8340E-4f)); comp += var_18[i] / (var_26 - (var_27 - -1.7063E-37f * +1.9946E-44f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float* tmp_19 = initPointer( atof(argv[19]) ); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28); hipDeviceSynchronize(); return 0; }
04fd5dda02c1446e06848115b4bed8c99d99fde4.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) { if (comp <= (-0.0f * sinhf((+1.5104E-35f + var_3)))) { float tmp_1 = (var_4 - var_5 * var_6 - var_7); comp = tmp_1 - (+1.0667E-36f * var_8); if (comp > var_9 - var_10 + (-1.1912E-37f - var_11)) { comp = cosf((-1.5333E-37f * -0.0f)); comp += -1.2488E-41f * (-1.2186E-43f - (var_12 * (-1.2041E-42f - var_13 / -1.1994E-44f))); comp += +1.4314E-43f / +1.2993E-28f + +1.5299E-15f - (var_14 - log10f((var_15 - +1.8415E-37f * -1.3874E-44f - +1.5091E35f))); comp += -0.0f + -1.8204E-37f; } for (int i=0; i < var_1; ++i) { comp = var_16 - var_17 + (-1.4232E34f + (-1.3987E-44f * -0.0f * +0.0f)); } for (int i=0; i < var_2; ++i) { float tmp_2 = +1.1386E-35f + var_19 - +0.0f + var_20 / -1.2414E-37f * +1.5185E-43f; comp = tmp_2 / (var_21 * (-1.4031E13f + var_22 / (var_23 + var_24))); var_18[i] = (var_25 / (-1.9526E-24f / +1.8340E-4f)); comp += var_18[i] / (var_26 - (var_27 - -1.7063E-37f * +1.9946E-44f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float* tmp_19 = initPointer( atof(argv[19]) ); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28); cudaDeviceSynchronize(); return 0; }
fa86b4d17f66877cde9ac827dc8a3849e4b6e27e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaRTCommon.h" #ifdef FULLSPECTRAL #include "cudaspechelper.h" #include <thrust/remove.h> #include <thrust/execution_policy.h> #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 10 #define PATHSTREAM_SIZE 1E4*64 namespace cudaRTPTStreamSpec { RT_ATTRIBS_N(0) RT_ATTRIBS_BGN RT_ATTRIBS_END enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float pathSample; uint pathWaveInd; uint pathSampleDepth; hiprandState_t randState; __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos , RAYTYPE _pathType, hiprandState_t _randState, uint waveInd) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f,1.f,1.f)) , pathType(_pathType) , pathSample(0.f) , pathWaveInd(waveInd) , pathSampleDepth(0) , randState(_randState) {} }; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { NPCudaSpecHelper::ClearBaseSpectrum(); g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devPathQueue); CUFREE(g_devPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(hipMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(hipMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(hipMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); NPCudaSpecHelper::InitBaseSpectrum(); } float* g_devResultData = nullptr; float* g_devRGBResultData = nullptr; //float* g_devAccResultData = nullptr; uint* g_devSampleResultN = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials , CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize , uint32 lambdaStart, uint32 lambdaEnd, uint32 specSampleN , NPCudaSpecHelper::Spectrum* baseSpec, float baseSpecIntY) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { uint waveInd = procVertex->pathWaveInd; float waveSpecData = mat->specPara[waveInd*specSampleN + waveInd]; if (length(emissive) > 0.f) { emissive = emissive * waveSpecData; } else { diff = make_float3(waveSpecData, waveSpecData, waveSpecData); } // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(hiprand_uniform(&procVertex->randState), hiprand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; //float wavelength = lambdaStart + (float)waveInd * ((float)lambdaEnd - (float)lambdaStart) / (float)specSampleN; //float sqMicWavelength = (0.001f * wavelength) * (0.001f * wavelength); float nt = specular * 0.8f + 1.f; if (mat->isUseSpecIOR) { nt = mat->specIOR[waveInd]; } float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); //if (cos2t < 0.f) //{ // reflProb = 1.0f;// refrProb = 0.f; //} //else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoH = vecDot(nl, hDir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * hiprand_uniform(&procVertex->randState); float r2cos = sqrtf(hiprand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; } } procVertex->pathSample = (vecDot(nl, norm) < 0.f || vecDot(nl, -1 * ray.dir) < mat->directivity) ? procVertex->pathSample : procVertex->pathSample + (vecMul(emissive, procVertex->pathOutMulTerm)).x; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f) pixelContrib = 0.f; if (hiprand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { procVertex->isTerminated = true; } } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, uint32 sampleSpecN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); hiprandState_t randstate; hiprand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (hiprand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); uint waveInd = hiprand_uniform(&randstate) * (float)(sampleSpecN - 1); pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate, waveInd); } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize , PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= pathStreamSize) { int pathQueueInd = pathQueueCur + ind - pathStreamSize; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[ind] = assignSample; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize , uint width, uint height, uint frameN, float* result, uint32 specSampleN, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; uint specInd = ind * specSampleN + pathQueue[x].pathWaveInd; if (!frameN) { for (uint i = 0; i < specSampleN; i++) { sampleResultN[ind * specSampleN + i] = 0; result[ind * specSampleN + i] = 0.f; } } uint tempNextSampleResultN = sampleResultN[specInd] + 1; if (tempNextSampleResultN > sampleResultN[specInd]) { float sampleResult = pathQueue[x].pathSample; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[specInd] * resultInf; result[specInd] = max(resultInf * sampleResult + oldInf * result[specInd], 0.f); sampleResultN[specInd] = tempNextSampleResultN; } } __global__ void pt_convertSpecToRGB_kernel(float* result, uint specSampleN, uint width, uint height, float* rgbResult , NPCudaSpecHelper::Spectrum* baseSpec, float baseSpecIntY) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= width * height) return; NPCudaSpecHelper::Spectrum spec(&result[x * specSampleN]); //for (uint i = 0; i < specSampleN; i++) //{ // spec.SetData(i,NPCudaSpecHelper::RGBToSPDAtInd(1.0f,0.f,0.f, i // , baseSpec[0].GetData(), baseSpec[1].GetData(), baseSpec[2].GetData(), baseSpecIntY)); //} float3 color; spec.GetRGB(color.x, color.y, color.z, baseSpec, baseSpecIntY); rgbResult[x * 3] = color.x; rgbResult[x * 3 + 1] = color.y; rgbResult[x * 3 + 2] = color.z; } void CleanMem() { freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devSampleResultN); CUFREE(g_devResultData); CUFREE(g_devRGBResultData); //CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid()) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); size_t mem_tot; size_t mem_free; hipMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData /*|| !g_devAccResultData*/ || !g_devRGBResultData || g_resultDataSize != (sizeof(float) * 3 * width * height)) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); hipMalloc((void**)&g_devResultData, sizeof(float) * width * height * NPCudaSpecHelper::c_u32SampleN); CUFREE(g_devRGBResultData); hipMalloc((void**)&g_devRGBResultData, g_resultDataSize); //CUFREE(g_devAccResultData); //hipMalloc((void**)&g_devAccResultData, g_resultDataSize); CUFREE(g_devSampleResultN); hipMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height * NPCudaSpecHelper::c_u32SampleN); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); // Kernel go here dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), NPCudaSpecHelper::c_u32SampleN, g_devPathQueue); HANDLE_KERNEL_ERROR(); hipDeviceSynchronize(); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < g_uPathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; pt_assignPathStream_kernel << < dim3(ceil((float)PATHSTREAM_SIZE / (float)block1.x), 1, 1), block1 >> >(g_devPathStream , activePathStreamSize, g_devPathQueue, g_uPathQueueCur, g_uPathQueueSize); //readjust activePathStreamSize activePathStreamSize = min((uint)PATHSTREAM_SIZE, activePathStreamSize + (g_uPathQueueSize - g_uPathQueueCur)); g_uPathQueueCur += activePathStreamSize - tempActivePathStreamSize; hipDeviceSynchronize(); //test sorting ray for more coherent tracing -> it does not improve performance //thrust::sort(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, ray_greater_compare()); pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices , g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize , NPCudaSpecHelper::c_u32LamdaStart, NPCudaSpecHelper::c_u32LamdaEnd , NPCudaSpecHelper::c_u32SampleN, NPCudaSpecHelper::g_pDevBaseSpec , NPCudaSpecHelper::g_fBaseSpecIntY); HANDLE_KERNEL_ERROR(); hipDeviceSynchronize(); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream , g_devPathStream + activePathStreamSize, is_terminated()); HANDLE_KERNEL_ERROR(); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } pt_applyPathQueueResult_kernel << < dim3(ceil((float)g_uPathQueueSize / (float)block1.x), 1, 1), block1 >> >(g_devPathQueue , g_uPathQueueSize, width, height, g_uCurFrameN, g_devResultData, NPCudaSpecHelper::c_u32SampleN , g_devSampleResultN); HANDLE_KERNEL_ERROR(); pt_convertSpecToRGB_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >(g_devResultData , NPCudaSpecHelper::c_u32SampleN, width, height, g_devRGBResultData, NPCudaSpecHelper::g_pDevBaseSpec , NPCudaSpecHelper::g_fBaseSpecIntY); HANDLE_KERNEL_ERROR(); // Copy result to host hipMemcpy(result, g_devRGBResultData, g_resultDataSize, hipMemcpyDeviceToHost); return true; } } #endif
fa86b4d17f66877cde9ac827dc8a3849e4b6e27e.cu
#include "cudaRTCommon.h" #ifdef FULLSPECTRAL #include "cudaspechelper.h" #include <thrust/remove.h> #include <thrust/execution_policy.h> #define BLOCK_SIZE 16 #define NORMALRAY_BOUND_MAX 10 #define PATHSTREAM_SIZE 1E4*64 namespace cudaRTPTStreamSpec { RT_ATTRIBS_N(0) RT_ATTRIBS_BGN RT_ATTRIBS_END enum RAYTYPE { RAYTYPE_EYE = 0, RAYTYPE_DIFF = 1, RAYTYPE_SPEC = 2 }; struct PTPathVertex { uint isTerminated; uint2 pathPixel; float3 pathOutDir; float3 pathVertexPos; float3 pathOutMulTerm; RAYTYPE pathType; float pathSample; uint pathWaveInd; uint pathSampleDepth; curandState randState; __device__ PTPathVertex(uint _isTerminated, uint2 _pathPixel, float3 _pathOutDir, float3 _pathVertexPos , RAYTYPE _pathType, curandState _randState, uint waveInd) : isTerminated(_isTerminated) , pathPixel(_pathPixel) , pathOutDir(_pathOutDir) , pathVertexPos(_pathVertexPos) , pathOutMulTerm(make_float3(1.f,1.f,1.f)) , pathType(_pathType) , pathSample(0.f) , pathWaveInd(waveInd) , pathSampleDepth(0) , randState(_randState) {} }; PTPathVertex* g_devPathQueue = nullptr; uint g_uPathQueueCur = 0; uint g_uPathQueueSize = 0; PTPathVertex** g_devPathStream = nullptr; uint g_uPathStreamSize = PATHSTREAM_SIZE; void freeStreamMem() { NPCudaSpecHelper::ClearBaseSpectrum(); g_uPathQueueCur = g_uPathQueueSize = 0; CUFREE(g_devPathQueue); CUFREE(g_devPathStream); } void allocateStreamMem(uint queueSize = 480000) { g_uPathQueueSize = queueSize; HANDLE_ERROR(cudaMalloc((void**)&g_devPathQueue, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathQueue, 0, sizeof(PTPathVertex) * g_uPathQueueSize)); HANDLE_ERROR(cudaMalloc((void**)&g_devPathStream, sizeof(PTPathVertex*) * g_uPathStreamSize)); HANDLE_ERROR(cudaMemset((void*)g_devPathStream, 0, sizeof(PTPathVertex*) * g_uPathStreamSize)); NPCudaSpecHelper::InitBaseSpectrum(); } float* g_devResultData = nullptr; float* g_devRGBResultData = nullptr; //float* g_devAccResultData = nullptr; uint* g_devSampleResultN = nullptr; NPMathHelper::Mat4x4 g_matLastCamMat; NPMathHelper::Mat4x4 g_matCurCamMat; uint32 g_uCurFrameN = 0; size_t g_resultDataSize = 0; uint32 WangHash(uint32 a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } __global__ void pt_traceSample_kernel(RTVertex* vertices, RTTriangle* triangles, RTMaterial* materials , CURTTexture* textures, PTPathVertex** pathStream, uint activePathStreamSize , uint32 lambdaStart, uint32 lambdaEnd, uint32 specSampleN , NPCudaSpecHelper::Spectrum* baseSpec, float baseSpecIntY) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= activePathStreamSize || pathStream[x]->isTerminated) return; PTPathVertex* procVertex = pathStream[x]; CURay ray = CURay(procVertex->pathVertexPos, procVertex->pathOutDir); TracePrimitiveResult traceResult; if (TracePrimitive(ray, traceResult, M_INF, M_FLT_BIAS_EPSILON, false)) { RTTriangle* tri = &triangles[traceResult.triId]; RTMaterial* mat = &materials[tri->matInd]; RTVertex* v0 = &vertices[tri->vertInd0]; RTVertex* v1 = &vertices[tri->vertInd1]; RTVertex* v2 = &vertices[tri->vertInd2]; float2 uv0 = make_float2(v0->tex._x, v0->tex._y); float2 uv1 = make_float2(v1->tex._x, v1->tex._y); float2 uv2 = make_float2(v2->tex._x, v2->tex._y); float2 uv = uv0 * traceResult.w + uv1 * traceResult.u + uv2 * traceResult.v; float3 n0 = V32F3(v0->norm); float3 n1 = V32F3(v1->norm); float3 n2 = V32F3(v2->norm); float3 norm = n0 * traceResult.w + n1 * traceResult.u + n2 * traceResult.v; float3 diff; float3 emissive; float trans; float specular; float metallic; float roughness; float anisotropic; float sheen; float sheenTint; float clearcoat; float clearcoatGloss; GetMaterialColors(mat, uv, textures, diff, norm, emissive, trans, specular, metallic, roughness , anisotropic, sheen, sheenTint, clearcoat, clearcoatGloss); float3 nl = vecDot(norm, ray.dir) < 0.f ? norm : -1 * norm; { uint waveInd = procVertex->pathWaveInd; float waveSpecData = mat->specPara[waveInd*specSampleN + waveInd]; if (length(emissive) > 0.f) { emissive = emissive * waveSpecData; } else { diff = make_float3(waveSpecData, waveSpecData, waveSpecData); } // Get some random microfacet float3 hDir = ImportanceSampleGGX(make_float2(curand_uniform(&procVertex->randState), curand_uniform(&procVertex->randState)), roughness, nl); // Calculate flesnel float voH = vecDot(-1 * ray.dir, hDir); float3 f0 = vecLerp(0.08 * make_float3(specular, specular, specular), diff, metallic); float3 brdf_f = Fresnel(f0, voH); // Reflected or Refracted float reflProb = lerp(length(brdf_f), 1.0f, metallic); float refrProb = trans; float3 reflDir; float3 refrDir; CURay nextRay = ray; float3 lightMulTerm; RAYTYPE nextRayType = procVertex->pathType; if (refrProb > 0) { bool into = vecDot(nl, norm) > 0.f; //float wavelength = lambdaStart + (float)waveInd * ((float)lambdaEnd - (float)lambdaStart) / (float)specSampleN; //float sqMicWavelength = (0.001f * wavelength) * (0.001f * wavelength); float nt = specular * 0.8f + 1.f; if (mat->isUseSpecIOR) { nt = mat->specIOR[waveInd]; } float nc = 1.0f; float nnt = into ? nc / nt : nt / nc; float ddn = vecDot(hDir, ray.dir); float cos2t = 1.f - nnt * nnt *(1.f - ddn * ddn); //if (cos2t < 0.f) //{ // reflProb = 1.0f;// refrProb = 0.f; //} //else { refrDir = normalize(ray.dir * nnt - hDir * (ddn*nnt + sqrtf(cos2t))); } } if (reflProb > 0) { reflDir = normalize(ray.dir - hDir * 2 * vecDot(hDir, ray.dir)); if (vecDot(reflDir, nl) < 0.f) reflProb = 0.f; } // Reflected if (ProbabilityRand(&procVertex->randState, reflProb)) { nextRay = CURay(ray.orig + (traceResult.dist - M_FLT_BIAS_EPSILON) * ray.dir, reflDir); // ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); // Microfacet specular = D*G*F / (4*NoL*NoV) // pdf = D * NoH / (4 * VoH) // (G * F * VoH) / (NoV * NoH) float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoH = vecDot(nl, hDir); float NoL = vecDot(nl, reflDir); float G = GeometricVisibility(roughness, NoV, NoL, VoH); //shadeResult = vecMul((brdf_f * G * VoH) / (NoV * NoH * reflProb) , nextRayResult.light) + emissive; lightMulTerm = (brdf_f * G * VoH) / (NoV * NoH * reflProb); nextRayType = RAYTYPE_SPEC; } // Diffused or Transmited else { // Transmited if (ProbabilityRand(&procVertex->randState, refrProb)) { nextRay = CURay(ray.orig + (traceResult.dist + M_FLT_BIAS_EPSILON) * ray.dir + refrDir * M_FLT_BIAS_EPSILON, refrDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float cosine = vecDot(-1 * nl, refrDir); //shadeResult = (cosine * vecMul(diff, nextRayResult.light)) / (refrProb * (1 - reflProb)) + emissive; lightMulTerm = cosine * diff / (refrProb * (1 - reflProb)); nextRayType = RAYTYPE_SPEC; } // Diffused else { float3 w = nl; float3 u = normalize(vecCross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = vecCross(w, u); u = vecCross(v, w); float r1 = 2.f * M_PI * curand_uniform(&procVertex->randState); float r2cos = sqrtf(curand_uniform(&procVertex->randState)); float r2sin = 1.f - r2cos*r2cos; float3 diffDir = normalize(w * r2cos + u * r2sin * cosf(r1) + v * r2sin * sinf(r1)); nextRay = CURay(ray.orig + traceResult.dist * ray.dir + diffDir * M_FLT_BIAS_EPSILON, diffDir); //ShootRayResult nextRayResult = pt0_normalRay<depth + 1>(nextRay, vertices, triangles, materials, textures, randstate); float VoH = vecDot(-1 * ray.dir, hDir); float NoV = vecDot(nl, -1 * ray.dir); float NoL = vecDot(nl, diffDir); //shadeResult = (M_PI * vecMul(Diffuse(diff, roughness, NoV, NoL, VoH), nextRayResult.light)) / ((1 - refrProb) * (1 - reflProb)) + emissive; lightMulTerm = M_PI * Diffuse(diff, roughness, NoV, NoL, VoH) / ((1 - refrProb) * (1 - reflProb)); nextRayType = RAYTYPE_DIFF; } } procVertex->pathSample = (vecDot(nl, norm) < 0.f || vecDot(nl, -1 * ray.dir) < mat->directivity) ? procVertex->pathSample : procVertex->pathSample + (vecMul(emissive, procVertex->pathOutMulTerm)).x; float pixelContrib = length(procVertex->pathOutMulTerm) * length(lightMulTerm); if (/*(procVertex->pathType == RAYTYPE_DIFF && nextRayType == RAYTYPE_SPEC) ||*/ length(emissive) > 0.f) pixelContrib = 0.f; if (curand_uniform(&procVertex->randState) > pixelContrib || procVertex->pathSampleDepth + 1 >= NORMALRAY_BOUND_MAX) { procVertex->isTerminated = true; } else { procVertex->pathOutMulTerm = vecMul(procVertex->pathOutMulTerm, lightMulTerm); procVertex->pathOutDir = nextRay.dir; procVertex->pathVertexPos = nextRay.orig; procVertex->pathType = nextRayType; procVertex->pathSampleDepth++; } } } else { procVertex->isTerminated = true; } } __global__ void pt_genPathQueue_kernel(float3 camPos, float3 camDir, float3 camUp, float3 camRight, float fov, float width, float height, uint32 frameN, uint32 hashedFrameN, uint32 sampleSpecN, PTPathVertex* pathQueue) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; uint ind = (y * width + x); float u = (2.f * ((float)x + 0.5f) / width - 1.f) * tan(fov * 0.5f) * width / height; float v = (2.f * ((float)y + 0.5f) / height - 1.f) * tan(fov * 0.5f); curandState randstate; curand_init(hashedFrameN + ind, 0, 0, &randstate); float au = u + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float av = v + (curand_uniform(&randstate) - 0.5f) / height * tan(fov * 0.5f); float3 dir = normalize(camRight * au + camUp * av + camDir); uint waveInd = curand_uniform(&randstate) * (float)(sampleSpecN - 1); pathQueue[ind] = PTPathVertex(false, make_uint2(x, y), dir, camPos, RAYTYPE_EYE, randstate, waveInd); } __global__ void pt_assignPathStream_kernel(PTPathVertex** pathStream, uint pathStreamSize , PTPathVertex* pathQueue, uint pathQueueCur, uint pathQueueSize) { uint ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= pathStreamSize) { int pathQueueInd = pathQueueCur + ind - pathStreamSize; PTPathVertex* assignSample = nullptr; if (pathQueueInd < pathQueueSize) { assignSample = &pathQueue[pathQueueInd]; } pathStream[ind] = assignSample; } } __global__ void pt_applyPathQueueResult_kernel(PTPathVertex* pathQueue, uint pathQueueSize , uint width, uint height, uint frameN, float* result, uint32 specSampleN, uint* sampleResultN) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= pathQueueSize) return; uint ind = pathQueue[x].pathPixel.y * width + pathQueue[x].pathPixel.x; uint specInd = ind * specSampleN + pathQueue[x].pathWaveInd; if (!frameN) { for (uint i = 0; i < specSampleN; i++) { sampleResultN[ind * specSampleN + i] = 0; result[ind * specSampleN + i] = 0.f; } } uint tempNextSampleResultN = sampleResultN[specInd] + 1; if (tempNextSampleResultN > sampleResultN[specInd]) { float sampleResult = pathQueue[x].pathSample; float resultInf = 1.f / (float)(tempNextSampleResultN); float oldInf = sampleResultN[specInd] * resultInf; result[specInd] = max(resultInf * sampleResult + oldInf * result[specInd], 0.f); sampleResultN[specInd] = tempNextSampleResultN; } } __global__ void pt_convertSpecToRGB_kernel(float* result, uint specSampleN, uint width, uint height, float* rgbResult , NPCudaSpecHelper::Spectrum* baseSpec, float baseSpecIntY) { uint x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= width * height) return; NPCudaSpecHelper::Spectrum spec(&result[x * specSampleN]); //for (uint i = 0; i < specSampleN; i++) //{ // spec.SetData(i,NPCudaSpecHelper::RGBToSPDAtInd(1.0f,0.f,0.f, i // , baseSpec[0].GetData(), baseSpec[1].GetData(), baseSpec[2].GetData(), baseSpecIntY)); //} float3 color; spec.GetRGB(color.x, color.y, color.z, baseSpec, baseSpecIntY); rgbResult[x * 3] = color.x; rgbResult[x * 3 + 1] = color.y; rgbResult[x * 3 + 2] = color.z; } void CleanMem() { freeStreamMem(); freeAllBVHCudaMem(); CUFREE(g_devSampleResultN); CUFREE(g_devResultData); CUFREE(g_devRGBResultData); //CUFREE(g_devAccResultData); } //struct ray_greater_compare //{ // __hd__ bool operator()(const PTPathVertex* vert1, const PTPathVertex* vert2) // { // int vert1Score = (vert1->pathOutDir.x > 0) + (vert1->pathOutDir.y > 0) + (vert1->pathOutDir.z > 0); // int vert2Score = (vert2->pathOutDir.x > 0) + (vert2->pathOutDir.y > 0) + (vert2->pathOutDir.z > 0); // return vert1Score > vert2Score; // } //}; struct is_terminated { __hd__ bool operator()(const PTPathVertex* vert) { return vert->isTerminated; } }; bool Render(NPMathHelper::Vec3 camPos, NPMathHelper::Vec3 camDir, NPMathHelper::Vec3 camUp, float fov, RTScene* scene , float width, float height, float* result) { // Check and allocate everything if (!scene || !scene->GetCompactBVH()->IsValid()) return false; NPMathHelper::Vec3 camRight = camDir.cross(camUp).normalize(); camUp = camRight.cross(camDir).normalize(); g_matLastCamMat = g_matCurCamMat; g_matCurCamMat = NPMathHelper::Mat4x4::lookAt(camPos, camPos + camDir, camUp); g_uCurFrameN = (g_matLastCamMat != g_matCurCamMat) ? 0 : g_uCurFrameN + 1; if (!g_bIsCudaInit || scene->GetIsCudaDirty()) { CleanMem(); g_matLastCamMat = g_matCurCamMat; g_uCurFrameN = 0; initAllSceneCudaMem(scene); allocateStreamMem(width * height); size_t mem_tot; size_t mem_free; cudaMemGetInfo(&mem_free, &mem_tot); std::cout << "Memory Used : " << mem_tot-mem_free << "/" << mem_tot << " -> Free " << mem_free << std::endl; } else if (scene->GetIsCudaMaterialDirty()) { updateAllSceneMaterialsCudaMem(scene); g_uCurFrameN = 0; } if (!g_bIsCudaInit) return false; if (!g_devResultData /*|| !g_devAccResultData*/ || !g_devRGBResultData || g_resultDataSize != (sizeof(float) * 3 * width * height)) { g_resultDataSize = sizeof(float) * 3 * width * height; CUFREE(g_devResultData); cudaMalloc((void**)&g_devResultData, sizeof(float) * width * height * NPCudaSpecHelper::c_u32SampleN); CUFREE(g_devRGBResultData); cudaMalloc((void**)&g_devRGBResultData, g_resultDataSize); //CUFREE(g_devAccResultData); //cudaMalloc((void**)&g_devAccResultData, g_resultDataSize); CUFREE(g_devSampleResultN); cudaMalloc((void**)&g_devSampleResultN, sizeof(uint) * width * height * NPCudaSpecHelper::c_u32SampleN); } float3 f3CamPos = V32F3(camPos); float3 f3CamUp = V32F3(camUp); float3 f3CamDir = V32F3(camDir); float3 f3CamRight = V32F3(camRight); // Kernel go here dim3 block1(BLOCK_SIZE*BLOCK_SIZE, 1, 1); dim3 block2(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 renderGrid(ceil(width / (float)block2.x), ceil(height / (float)block2.y), 1); pt_genPathQueue_kernel << < renderGrid, block2 >> > (f3CamPos, f3CamDir, f3CamUp, f3CamRight, fov, width, height , g_uCurFrameN, WangHash(g_uCurFrameN), NPCudaSpecHelper::c_u32SampleN, g_devPathQueue); HANDLE_KERNEL_ERROR(); cudaDeviceSynchronize(); uint activePathStreamSize = 0; g_uPathQueueCur = 0; while (g_uPathQueueCur < g_uPathQueueSize || activePathStreamSize > 0) { uint tempActivePathStreamSize = activePathStreamSize; pt_assignPathStream_kernel << < dim3(ceil((float)PATHSTREAM_SIZE / (float)block1.x), 1, 1), block1 >> >(g_devPathStream , activePathStreamSize, g_devPathQueue, g_uPathQueueCur, g_uPathQueueSize); //readjust activePathStreamSize activePathStreamSize = min((uint)PATHSTREAM_SIZE, activePathStreamSize + (g_uPathQueueSize - g_uPathQueueCur)); g_uPathQueueCur += activePathStreamSize - tempActivePathStreamSize; cudaDeviceSynchronize(); //test sorting ray for more coherent tracing -> it does not improve performance //thrust::sort(thrust::device, g_devPathStream, g_devPathStream + activePathStreamSize, ray_greater_compare()); pt_traceSample_kernel << < dim3(ceil((float)activePathStreamSize / (float)block1.x), 1, 1), block1 >> > (g_devVertices , g_devTriangles, g_devMaterials, g_devTextures, g_devPathStream, activePathStreamSize , NPCudaSpecHelper::c_u32LamdaStart, NPCudaSpecHelper::c_u32LamdaEnd , NPCudaSpecHelper::c_u32SampleN, NPCudaSpecHelper::g_pDevBaseSpec , NPCudaSpecHelper::g_fBaseSpecIntY); HANDLE_KERNEL_ERROR(); cudaDeviceSynchronize(); //compact pathstream and find activePathStreamSize value PTPathVertex** compactedStreamEndItr = thrust::remove_if(thrust::device, g_devPathStream , g_devPathStream + activePathStreamSize, is_terminated()); HANDLE_KERNEL_ERROR(); activePathStreamSize = compactedStreamEndItr - g_devPathStream; } pt_applyPathQueueResult_kernel << < dim3(ceil((float)g_uPathQueueSize / (float)block1.x), 1, 1), block1 >> >(g_devPathQueue , g_uPathQueueSize, width, height, g_uCurFrameN, g_devResultData, NPCudaSpecHelper::c_u32SampleN , g_devSampleResultN); HANDLE_KERNEL_ERROR(); pt_convertSpecToRGB_kernel << < dim3(ceil((float)(width * height) / (float)block1.x), 1, 1), block1 >> >(g_devResultData , NPCudaSpecHelper::c_u32SampleN, width, height, g_devRGBResultData, NPCudaSpecHelper::g_pDevBaseSpec , NPCudaSpecHelper::g_fBaseSpecIntY); HANDLE_KERNEL_ERROR(); // Copy result to host cudaMemcpy(result, g_devRGBResultData, g_resultDataSize, cudaMemcpyDeviceToHost); return true; } } #endif
5088047fe96f0841d8861db36098439ca92b2cf9.hip
// !!! This is a file automatically generated by hipify!!! #include "mex.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cusolverDn.h> #include <chrono> #include <hip/hip_runtime_api.h> //mexcuda RSLP.cu -lcublas -lcusolver -lcudart //nvprof --profile-from-start off "C:\Program Files\MATLAB\R2018b\bin\win64\MATLAB.exe" -r RSLPtest #define VERBOSE 1 #define LOG_ERROR 1 FILE *logfile; void LogCudaError(const char* title, hipError_t ret){ if(!LOG_ERROR) return; fprintf(logfile, "%s = %s\n", title, hipGetErrorName(ret)); } void LogCudaSolverError(const char* title, cusolverStatus_t ret){ const char *errorname; if(!LOG_ERROR) return; switch(ret){ case CUSOLVER_STATUS_SUCCESS: errorname = "CUSOLVER_STATUS_SUCCESS"; break; case CUSOLVER_STATUS_NOT_INITIALIZED: errorname = "CUSOLVER_STATUS_NOT_INITIALIZED"; break; case CUSOLVER_STATUS_ALLOC_FAILED: errorname = "CUSOLVER_STATUS_ALLOC_FAILED"; break; case CUSOLVER_STATUS_INVALID_VALUE: errorname = "CUSOLVER_STATUS_INVALID_VALUE"; break; case CUSOLVER_STATUS_ARCH_MISMATCH: errorname = "CUSOLVER_STATUS_ARCH_MISMATCH"; break; case CUSOLVER_STATUS_MAPPING_ERROR: errorname = "CUSOLVER_STATUS_MAPPING_ERROR"; break; case CUSOLVER_STATUS_EXECUTION_FAILED: errorname = "CUSOLVER_STATUS_EXECUTION_FAILED"; break; case CUSOLVER_STATUS_INTERNAL_ERROR: errorname = "CUSOLVER_STATUS_INTERNAL_ERROR"; break; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: errorname = "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; break; case CUSOLVER_STATUS_NOT_SUPPORTED: errorname = "CUSOLVER_STATUS_NOT_SUPPORTED"; break; case CUSOLVER_STATUS_ZERO_PIVOT: errorname = "CUSOLVER_STATUS_ZERO_PIVOT"; break; case CUSOLVER_STATUS_INVALID_LICENSE: errorname = "CUSOLVER_STATUS_INVALID_LICENSE"; break; default: errorname = "Unknow error"; } fprintf(logfile, "%s = %s\n", title, errorname); } __global__ void initZeroVector(double *deviceVector, int M) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < M) { deviceVector[i] = 0; } } __global__ void initdeviceA(double *deviceA, double * deviceAineq, double * deviceAeq, int Mineq, int Meq, int N){ int k = blockDim.x*blockIdx.x + threadIdx.x; if ( k < Mineq + Meq){ if ( k < Mineq){ for (int i = 0 ; i < N ; i++){ deviceA[ i * (Mineq + Meq) + k] = deviceAineq[i * Mineq + k]; } } else if ( k >= Mineq && k < Mineq + Meq){ for (int i = 0 ; i < N ; i++){ deviceA[ i * (Mineq + Meq) + k] = deviceAineq[i * Meq + k - Mineq]; } } } } __global__ void initIdentityGPU(double *deviceI, int Mineq, int Meq) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < Mineq + Meq) { for (int a = 0 ; a < Mineq ; a++){ if(i == a) deviceI[a * Mineq + Meq + i ] = 1.0; else deviceI[a * Mineq + Meq + i ] = 0.0; } } } __global__ void CopyCol(int i, double* deviceVector, double* deviceA, double* deviceI, int basic_number, int N){ int a = blockDim.x*blockIdx.x + threadIdx.x; if( a < basic_number){ if(i < N){ deviceVector[a] = deviceA[basic_number*i+a]; } else{ deviceVector[a] = deviceI[basic_number*(i-N) + a]; } } } __global__ void initart(double *art, int M){ int a = blockDim.x*blockIdx.x + threadIdx.x; if(a < M){ art[a] = -1.0; } } void *LUfactorization( double *deviceA, int M, int *deviceIpiv, double * d_work, int *devInfo, hipsolverDnHandle_t cusolverH, const int ldA ){ //LU factorization hipsolverDnDgetrf(cusolverH, M, M, deviceA, ldA, d_work, deviceIpiv, devInfo); hipDeviceSynchronize(); return 0; } double *SolveLinear(double *deviceA, double *deviceB, int M, int *deviceIpiv, double *d_work, int *devInfo, hipsolverDnHandle_t cusolverH, const int ldA, const int ldB, double *output) { //solve hipsolverDnDgetrs(cusolverH, HIPBLAS_OP_N, M, 1, deviceA, ldA, deviceIpiv, deviceB, ldB, devInfo); hipDeviceSynchronize(); hipMemcpy(output, deviceB, sizeof(double) * M, hipMemcpyDeviceToHost); return 0; } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ){ double *deviceC, *deviceA, *deviceMatrix, *deviceVector, *deviceVectorN, *deviceI, *deviceAeq, *deviceAineq; int *deviceIpiv; double *Aineq, *bineq, *c, *x, *Aeq, *beq; int N, Mineq, Meq; int it = 0 ; double* one = new double [1] ; one[0] = 1.0; double* zero = new double [1]; zero[0] = 0; double value = 0 ; double *d_work = NULL ; int *devInfo = NULL; int lwork; char const * const errId = "parallel:gpu:CuSOLVER:InvalidInput"; char const * const errMsg = "Invalid Input to MEX file."; if( (nrhs != 5) || !mxIsDouble(prhs[0]) || mxGetM(prhs[1]) != mxGetM(prhs[2]) || mxGetM(prhs[3]) != mxGetM(prhs[4])){ mexErrMsgIdAndTxt(errId, errMsg); } logfile = fopen("rslplog.txt","w"); if(VERBOSE) fprintf(logfile, "== RSLP start ==\n"); hipProfilerStart(); //receive data from inputs c =(double *)mxGetData(prhs[0]); Aineq =(double *)mxGetData(prhs[1]); bineq =(double *)mxGetData(prhs[2]); Aeq = (double *)mxGetData(prhs[3]); beq = (double *)mxGetData(prhs[4]); //sizes N = (int)mxGetN(prhs[0]); //column of A (number of variables) Mineq = (int)mxGetM(prhs[1]); //row of A (number of unequalities) Meq = (int)mxGetM(prhs[3]); //row of Aeq (number of equalities) int basic_number = Mineq + Meq; int non_basic_number = N - Meq; fprintf(logfile, "basic number = %d + %d = %d\n", Mineq, Meq, basic_number); fprintf(logfile, "non basic number = %d - %d = %d\n", N, Meq, non_basic_number); const int ldA = basic_number; const int ldB = basic_number; int threadsPerBlock = 512 ; int blocksPerGrid = (basic_number + threadsPerBlock -1)/ threadsPerBlock; double alpha = 1.0; double beta = 0; //Malloc hipMalloc(&deviceMatrix, sizeof(double) * (basic_number) * (basic_number)); hipMalloc(&deviceVector, sizeof(double) * basic_number ); hipMalloc(&deviceVectorN, sizeof(double) * N ); hipMalloc(&deviceC, sizeof(double) * (N+Mineq)); hipMalloc(&deviceI, sizeof(double) * basic_number * Mineq); hipMalloc(&deviceAineq, sizeof(double) * Mineq * N); hipMalloc(&deviceAeq, sizeof(double) * Meq * N); hipMalloc(&deviceA, sizeof(double) * basic_number * N); //Memcpy A hipMemcpy(deviceAineq, Aineq, sizeof(double) * Mineq * N, hipMemcpyHostToDevice); hipMemcpy(deviceAeq, Aeq, sizeof(double) * Meq * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( initdeviceA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, deviceA, deviceAineq, deviceAeq, Mineq, Meq, N); double *_A = new double[basic_number * N]; hipMemcpy(_A, deviceA, sizeof(double) * basic_number * N, hipMemcpyDeviceToHost); fprintf(logfile, "== A ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < N ; col++){ fprintf(logfile, "%9.9f\t", _A[col*basic_number + row]); } fprintf(logfile, "\n"); } //Memcpy C hipLaunchKernelGGL(( initZeroVector), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, deviceC, (Mineq+N)); hipMemcpy(deviceC, c, sizeof(double) * N, hipMemcpyHostToDevice); //init Identity hipLaunchKernelGGL(( initIdentityGPU), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, deviceI, Mineq, Meq); double *I = new double[basic_number * Mineq]; hipMemcpy(I, deviceI, sizeof(double) * basic_number * Mineq, hipMemcpyDeviceToHost); fprintf(logfile, "== I ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < Mineq ; col++){ fprintf(logfile, "%9.9f\t", I[col*basic_number + row]); } fprintf(logfile, "\n"); } //create outputs plhs[0] = mxCreateDoubleScalar(value); plhs[1] = mxCreateNumericMatrix(N, 1, mxDOUBLE_CLASS, mxREAL); x = ( double *)mxGetData(plhs[1]); //create DeviceInformation and Permution Identity Vector hipMalloc(&devInfo, sizeof(double)); hipMalloc(&deviceIpiv, sizeof(int) * basic_number); //setting Handle variable hipblasHandle_t handle; hipStream_t stream = NULL; hipsolverDnHandle_t cusolverH = NULL ; hipsolverDnCreate(&cusolverH); hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); hipsolverDnSetStream(cusolverH, stream); hipblasCreate(&handle); //get local work size hipsolverDnDgetrf_bufferSize(cusolverH, basic_number, basic_number, deviceMatrix, ldA, &lwork); hipMalloc((void**)&d_work, sizeof(double)*lwork); //tic auto start = std::chrono::high_resolution_clock::now(); //create all variables for RSLP //define basic_index int *basic_index = new int[basic_number]; for(int i = 0 ; i < basic_number ; i++){ basic_index[i] = N - Meq + i; } //fprintf(logfile, "=== basic_index ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //define non_basic_index int *non_basic_index = new int[non_basic_number + 1]; for(int i = 0; i < non_basic_number + 1 ; i++){ non_basic_index[i] = i; } //fprintf(logfile, "=== non_basic_index ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); //define B double *B = new double[basic_number * basic_number]; //define c_basic double* c_basic = new double[basic_number]; hipMalloc(&c_basic, sizeof(double) * basic_number ); for (int i = 0 ; i < basic_number ; i++){ hipLaunchKernelGGL(( CopyCol), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); hipMemcpy(B + i * basic_number , deviceVector, sizeof(double) * basic_number, hipMemcpyDeviceToHost); hipMemcpy(c_basic + i , deviceC + basic_index[i], sizeof(double), hipMemcpyDeviceToDevice); } //fprintf(logfile, "== B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); //} //define x_B double *x_B = new double[basic_number]; //define P and min_P double *P = new double[basic_number]; double *min_P = new double[basic_number]; //define c_d double c_d = 0 ; double min_c_d ; //define unbound and infeasible condition bool unbound = false; bool infeasible = false; int leave_index, enter_index; //check initial basic solution bool phaseI = false ; hipMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, hipMemcpyHostToDevice); hipMemcpy(deviceVector, bineq, sizeof(double) * Mineq , hipMemcpyHostToDevice); hipMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , hipMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); //calculate x_B = B/b SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); double min_x_B = 999999999999999999999999999999999.0; for ( int i = 0 ; i < basic_number ; i++){ if ( x_B[i] < min_x_B ){ min_x_B = x_B[i]; leave_index = i; //fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } } //fprintf(logfile, "== x_B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", x_B[row]); //} //fprintf(logfile, "\n"); //artificial column double* art = new double[basic_number]; if( min_x_B < 0){ phaseI = true; fprintf(logfile, "== phase I ==\n"); int a = basic_index[leave_index]; fprintf(logfile, "a = %d\n", a); //fprintf(logfile, "a = %d\n", a); basic_index[leave_index] = int(basic_number + non_basic_number + 1); for (int i = 0 ; i < basic_number ; i++ ){ if (basic_index[i] == basic_number + non_basic_number + 1){ hipMemcpy(c_basic + i, one, sizeof(double), hipMemcpyHostToDevice); } else{ hipMemcpy(c_basic + i, zero, sizeof(double), hipMemcpyHostToDevice); } } non_basic_index[non_basic_number] = a; hipMalloc(&art, sizeof(double) * basic_number); hipLaunchKernelGGL(( initart), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, art, basic_number); hipMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, hipMemcpyHostToDevice); hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, basic_number, 1, basic_number, &alpha, deviceMatrix, basic_number, art, basic_number, &beta, deviceVector, basic_number); hipMemcpy(art , deviceVector, sizeof(double) *basic_number, hipMemcpyDeviceToDevice); //double* host_art = new double[basic_number]; //hipMemcpy(host_art , art , sizeof(double) *basic_number, hipMemcpyDeviceToHost); // //fprintf(logfile, "== host_art ==\n"); //for (int i = 0; i < basic_number ;i++){ // fprintf(logfile, "%9.9f\n", host_art[i]); //} //fprintf(logfile, "\n"); hipMemcpy(B+basic_number*leave_index, art, sizeof(double) * basic_number, hipMemcpyDeviceToHost); //fprintf(logfile, "== basic variable ==\n"); //for (int i = 0; i < basic_number ;i++){ // fprintf(logfile, "%d\t", basic_index[i]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "== non-basic variable ==\n"); //for (int i = 0; i <non_basic_number + 1 ;i++){ // fprintf(logfile, "%d\t", non_basic_index[i]); //} //fprintf(logfile, "\n"); //double* host_c_basic = new double[basic_number]; //hipMemcpy(host_c_basic, c_basic, sizeof(double) * basic_number, hipMemcpyDeviceToHost); //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\t", host_c_basic[row]); //} //fprintf(logfile, "\n"); } while (phaseI == true){ fprintf(logfile, "== iteration %d ==\n",it); hipMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, hipMemcpyHostToDevice); hipMemcpy(deviceVector, bineq, sizeof(double) * Mineq , hipMemcpyHostToDevice); hipMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , hipMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); fprintf(logfile, "== x_B ==\n"); for(int row = 0 ; row < basic_number ; row++){ fprintf(logfile, "%9.9f\n", x_B[+ row]); } fprintf(logfile, "\n"); min_c_d = 9999999999999999999999999999999999999999999999.0 ; //hipMemcpy(host_c_basic, c_basic, sizeof(double) * M, hipMemcpyDeviceToHost); //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < M ; row++){ // for(int col = 0 ; col < 1 ; col++){ // fprintf(logfile, "%9.9f\t", host_c_basic[col*M + row]); // } // fprintf(logfile, "\n"); //} //find entering index for(int i = 0 ; i < non_basic_number+1 ; i++){ // === calculate P === if ( non_basic_index[i] == non_basic_number+basic_number+1){ hipMemcpy(deviceVector, art, sizeof(double) * basic_number, hipMemcpyDeviceToDevice); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) hipblasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); c_d = 1.0 - value ; fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, 1.0, value, c_d); } else{ hipLaunchKernelGGL(( CopyCol), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, non_basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) hipblasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); fprintf(logfile, "c_basic*P = %4.4f \n", value); c_d = 0.0 - value ; fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, 0.0, value, c_d); } fprintf(logfile, "== P ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < 1 ; col++){ fprintf(logfile, "%9.9f\t", P[col*basic_number + row]); } fprintf(logfile, "\n"); } if (c_d <= min_c_d){ min_c_d = c_d; hipMemcpy(min_P, P, sizeof(double) * basic_number, hipMemcpyHostToHost); enter_index = i; } } //fprintf(logfile, "min_c_d = %9.9f\n", min_c_d); //fprintf(logfile, "== min_P ==\n"); //for(int row = 0 ; row < basic_index ; row++){ // for(int col = 0 ; col < 1 ; col++){ // fprintf(logfile, "%9.9f\t", min_P[col*basic_index + row]); // } // fprintf(logfile, "\n"); //} if ( min_c_d >= 0 ){ it++; //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); break; } else { unbound = true; for(int i = 0; i < basic_number ; i++){ if (min_P[i] > 0){ unbound = false; fprintf(logfile, "min_P[%d] > 0\n", i); break; } } if(unbound == true){ break; } double m = 999999999999999999999999999999999.0; int repeat = 0; leave_index = -1; for ( int i = 0 ; i < basic_number ; i++){ if(min_P[i] > 0){ double ratio = x_B[i] / min_P[i]; if ( ratio < m && ratio >= 0){ m = ratio; leave_index = i; repeat = 0; fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } else if (ratio == m){ fprintf(logfile, "ratio_%d = %9.9f, m = %9.9f\n", i, ratio, m); repeat++; } } } if (leave_index == -1){ unbound = true; fprintf(logfile, "leave_index = -1\n"); } if (repeat != 0){ unbound = true; fprintf(logfile, "leave_index is repeated\n"); } if (unbound == true){ break; } //fprintf(logfile, "enter_index = %d\n", non_basic_index[enter_index]); //fprintf(logfile, "leave_index = %d\n", basic_index[leave_index]); //update basic feasible solution int a = basic_index[leave_index]; basic_index[leave_index] = non_basic_index[enter_index]; if (non_basic_index[enter_index] == non_basic_number+basic_number+1){ hipMemcpy(c_basic+leave_index, one, sizeof(double) * 1, hipMemcpyHostToDevice); } else{ hipMemcpy(c_basic+leave_index, zero, sizeof(double) * 1, hipMemcpyHostToDevice); } non_basic_index[enter_index] = a; if (basic_index[leave_index] == non_basic_number+basic_number+1){ hipMemcpy(B+basic_number*leave_index, art, sizeof(double) * basic_number, hipMemcpyDeviceToHost); } else{ hipLaunchKernelGGL(( CopyCol), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, basic_index[leave_index], deviceVector, deviceA, deviceI, basic_number, N); hipMemcpy(B+basic_number*leave_index, deviceVector, sizeof(double) * basic_number, hipMemcpyDeviceToHost); } //fprintf(logfile, "== new B ==\n"); //for(int row = 0 ; row < M ; row++){ // for(int col = 0 ; col < M ; col++){ // fprintf(logfile, "%9.9f\t", B[col*M + row]); // } // fprintf(logfile, "\n"); //} //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < M ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < N+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); } it++; } min_x_B = 999999999999999999999999999999999.0; for ( int i = 0 ; i < basic_number ; i++){ if ( x_B[i] < min_x_B ){ min_x_B = x_B[i]; //fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } if(basic_index[i] == basic_number + non_basic_number + 1){ infeasible = true; } } if( min_x_B < 0 && phaseI == true){ infeasible = true; } if (infeasible == true){ fprintf(logfile, "=== infeasible solution ===\n"); } else{ infeasible = false; fprintf(logfile, "=== phase II ===\n"); if (phaseI == true){ for (int i = 0; i < basic_number ; i++){ hipMemcpy(c_basic + i, deviceC + basic_index[i], sizeof(double), hipMemcpyDeviceToDevice); } for(int i = 0; i < non_basic_number+1; i++){ if ( non_basic_index[i] == non_basic_number+basic_number+1 ){ for (int j = i+1 ; j < non_basic_number+1 ; j++){ non_basic_index[j-1] = non_basic_index[j]; } break; } } } } //fprintf(logfile, "== new B ==\n"); // for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); // } //fprintf(logfile, "== basic_index ==\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); // //fprintf(logfile, "== non_basic_index ==\n"); //for(int col = 0 ; col < non_basic_number+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); while(infeasible == false){ fprintf(logfile, "== iteration %d ==\n",it); hipMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, hipMemcpyHostToDevice); hipMemcpy(deviceVector, bineq, sizeof(double) * Mineq , hipMemcpyHostToDevice); hipMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , hipMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); fprintf(logfile, "== x_B ==\n"); for(int row = 0 ; row < basic_number ; row++){ fprintf(logfile, "%9.9f\t", x_B[row]); } min_c_d = 9999999999999999999999999999999999999999999999.0 ; //double *host_c_basic = new double[basic_number]; //hipMemcpy(host_c_basic, c_basic, sizeof(double) * basic_number, hipMemcpyDeviceToHost); // //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", host_c_basic[row]); //} //CopyCol<<<blocksPerGrid, threadsPerBlock>>>(non_basic_index[0], deviceVector, deviceA, deviceI, basic_number, N); //hipMemcpy(host_c_basic, deviceVector, sizeof(double) * basic_number, hipMemcpyDeviceToHost); // //fprintf(logfile, "== cpyCol ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", host_c_basic[row]); //} //find entering index for(int i = 0 ; i < non_basic_number ; i++){ // === calculate P === hipLaunchKernelGGL(( CopyCol), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, non_basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) hipblasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); c_d = c[non_basic_index[i]] - value ; //fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, c[non_basic_index[i]], value, c_d); if (c_d <= min_c_d){ min_c_d = c_d; hipMemcpy(min_P, P, sizeof(double) * basic_number, hipMemcpyHostToHost); enter_index = i; //fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, c[non_basic_index[i]], value, c_d); } } fprintf(logfile, "min_c_d = %9.9f\n", min_c_d); fprintf(logfile, "== min_P ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < 1 ; col++){ fprintf(logfile, "%9.9f\t", min_P[col*basic_number + row]); } fprintf(logfile, "\n"); } if ( min_c_d >= 0 ){ it++; //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); break; } else { unbound = true; for(int i = 0; i < basic_number ; i++){ if (min_P[i] > 0){ unbound = false; fprintf(logfile, "min_P[%d] > 0\n", i); break; } } if(unbound == true){ break; } double m = 999999999999999999999999999999999.0; int repeat = 0; leave_index = -1; for ( int i = 0 ; i < basic_number ; i++){ if(min_P[i] > 0){ double ratio = x_B[i] / min_P[i]; if ( ratio < m && ratio >= 0){ m = ratio; leave_index = i; repeat = 0; fprintf(logfile, "min ratio_%d = %9.9f / %9.9f = %9.9f\n", i, x_B[i], min_P[i], ratio); } else if (ratio == m){ fprintf(logfile, "ratio_%d = %9.9f, m = %9.9f\n", i, ratio, m); repeat++; } } } if (leave_index == -1){ unbound = true; fprintf(logfile, "leave_index = -1\n"); } if (repeat != 0){ unbound = true; fprintf(logfile, "leave_index is repeated\n"); } if (unbound == true){ break; } fprintf(logfile, "enter_index = %d\n", non_basic_index[enter_index]); fprintf(logfile, "leave_index = %d\n", basic_index[leave_index]); //update basic feasible solution int a = basic_index[leave_index]; basic_index[leave_index] = non_basic_index[enter_index]; hipMemcpy(c_basic+leave_index, deviceC + non_basic_index[enter_index], sizeof(double) * 1, hipMemcpyHostToDevice); non_basic_index[enter_index] = a; hipLaunchKernelGGL(( CopyCol), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, basic_index[leave_index], deviceVector, deviceA, deviceI, basic_number, N); hipMemcpy(B+basic_number*leave_index, deviceVector, sizeof(double) * basic_number, hipMemcpyDeviceToHost); //fprintf(logfile, "== new B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); //} //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); } it++; } if(infeasible == true){ value = 0.0; //plhs[0] = mxCreateDoubleScalar(value); } else{ if (unbound == false){ fprintf(logfile, "=== optimal solution ===\n"); for(int i = 0; i < basic_number ; i++) { if(basic_index[i] < N){ x[basic_index[i]] = x_B[i]; } } hipMemcpy(deviceVectorN, x, sizeof(double) * N, hipMemcpyHostToDevice); hipblasDdot (handle, N, deviceVectorN, 1, deviceC, 1, &value); plhs[0] = mxCreateDoubleScalar(value); } else{ fprintf(logfile, "=== unbounded solution ===\n"); value = 0.0; //plhs[0] = mxCreateDoubleScalar(value); } } //toc auto finish = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = finish - start; fprintf(logfile, "Runtime = %lf \n", elapsed.count()); fprintf(logfile, "minimum value = %9.9f\n", value); fprintf(logfile, "iteration = %d\n", it); hipblasDestroy(handle); hipsolverDnDestroy(cusolverH); hipStreamDestroy(stream); if(phaseI == true)hipFree(art); hipFree(devInfo); hipFree(d_work); hipFree(deviceIpiv); hipFree(deviceMatrix); hipFree(deviceVector); hipFree(deviceVectorN); hipFree(deviceC); hipFree(deviceI); hipFree(deviceAineq); hipFree(deviceAeq); hipFree(deviceA); hipFree(c_basic); hipDeviceReset(); hipProfilerStop(); if(VERBOSE) fprintf(logfile, "== RSLP finished ==\n"); fclose(logfile); }
5088047fe96f0841d8861db36098439ca92b2cf9.cu
#include "mex.h" #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cusolverDn.h> #include <chrono> #include <cuda_profiler_api.h> //mexcuda RSLP.cu -lcublas -lcusolver -lcudart //nvprof --profile-from-start off "C:\Program Files\MATLAB\R2018b\bin\win64\MATLAB.exe" -r RSLPtest #define VERBOSE 1 #define LOG_ERROR 1 FILE *logfile; void LogCudaError(const char* title, cudaError_t ret){ if(!LOG_ERROR) return; fprintf(logfile, "%s = %s\n", title, cudaGetErrorName(ret)); } void LogCudaSolverError(const char* title, cusolverStatus_t ret){ const char *errorname; if(!LOG_ERROR) return; switch(ret){ case CUSOLVER_STATUS_SUCCESS: errorname = "CUSOLVER_STATUS_SUCCESS"; break; case CUSOLVER_STATUS_NOT_INITIALIZED: errorname = "CUSOLVER_STATUS_NOT_INITIALIZED"; break; case CUSOLVER_STATUS_ALLOC_FAILED: errorname = "CUSOLVER_STATUS_ALLOC_FAILED"; break; case CUSOLVER_STATUS_INVALID_VALUE: errorname = "CUSOLVER_STATUS_INVALID_VALUE"; break; case CUSOLVER_STATUS_ARCH_MISMATCH: errorname = "CUSOLVER_STATUS_ARCH_MISMATCH"; break; case CUSOLVER_STATUS_MAPPING_ERROR: errorname = "CUSOLVER_STATUS_MAPPING_ERROR"; break; case CUSOLVER_STATUS_EXECUTION_FAILED: errorname = "CUSOLVER_STATUS_EXECUTION_FAILED"; break; case CUSOLVER_STATUS_INTERNAL_ERROR: errorname = "CUSOLVER_STATUS_INTERNAL_ERROR"; break; case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: errorname = "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; break; case CUSOLVER_STATUS_NOT_SUPPORTED: errorname = "CUSOLVER_STATUS_NOT_SUPPORTED"; break; case CUSOLVER_STATUS_ZERO_PIVOT: errorname = "CUSOLVER_STATUS_ZERO_PIVOT"; break; case CUSOLVER_STATUS_INVALID_LICENSE: errorname = "CUSOLVER_STATUS_INVALID_LICENSE"; break; default: errorname = "Unknow error"; } fprintf(logfile, "%s = %s\n", title, errorname); } __global__ void initZeroVector(double *deviceVector, int M) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < M) { deviceVector[i] = 0; } } __global__ void initdeviceA(double *deviceA, double * deviceAineq, double * deviceAeq, int Mineq, int Meq, int N){ int k = blockDim.x*blockIdx.x + threadIdx.x; if ( k < Mineq + Meq){ if ( k < Mineq){ for (int i = 0 ; i < N ; i++){ deviceA[ i * (Mineq + Meq) + k] = deviceAineq[i * Mineq + k]; } } else if ( k >= Mineq && k < Mineq + Meq){ for (int i = 0 ; i < N ; i++){ deviceA[ i * (Mineq + Meq) + k] = deviceAineq[i * Meq + k - Mineq]; } } } } __global__ void initIdentityGPU(double *deviceI, int Mineq, int Meq) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < Mineq + Meq) { for (int a = 0 ; a < Mineq ; a++){ if(i == a) deviceI[a * Mineq + Meq + i ] = 1.0; else deviceI[a * Mineq + Meq + i ] = 0.0; } } } __global__ void CopyCol(int i, double* deviceVector, double* deviceA, double* deviceI, int basic_number, int N){ int a = blockDim.x*blockIdx.x + threadIdx.x; if( a < basic_number){ if(i < N){ deviceVector[a] = deviceA[basic_number*i+a]; } else{ deviceVector[a] = deviceI[basic_number*(i-N) + a]; } } } __global__ void initart(double *art, int M){ int a = blockDim.x*blockIdx.x + threadIdx.x; if(a < M){ art[a] = -1.0; } } void *LUfactorization( double *deviceA, int M, int *deviceIpiv, double * d_work, int *devInfo, cusolverDnHandle_t cusolverH, const int ldA ){ //LU factorization cusolverDnDgetrf(cusolverH, M, M, deviceA, ldA, d_work, deviceIpiv, devInfo); cudaDeviceSynchronize(); return 0; } double *SolveLinear(double *deviceA, double *deviceB, int M, int *deviceIpiv, double *d_work, int *devInfo, cusolverDnHandle_t cusolverH, const int ldA, const int ldB, double *output) { //solve cusolverDnDgetrs(cusolverH, CUBLAS_OP_N, M, 1, deviceA, ldA, deviceIpiv, deviceB, ldB, devInfo); cudaDeviceSynchronize(); cudaMemcpy(output, deviceB, sizeof(double) * M, cudaMemcpyDeviceToHost); return 0; } void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ){ double *deviceC, *deviceA, *deviceMatrix, *deviceVector, *deviceVectorN, *deviceI, *deviceAeq, *deviceAineq; int *deviceIpiv; double *Aineq, *bineq, *c, *x, *Aeq, *beq; int N, Mineq, Meq; int it = 0 ; double* one = new double [1] ; one[0] = 1.0; double* zero = new double [1]; zero[0] = 0; double value = 0 ; double *d_work = NULL ; int *devInfo = NULL; int lwork; char const * const errId = "parallel:gpu:CuSOLVER:InvalidInput"; char const * const errMsg = "Invalid Input to MEX file."; if( (nrhs != 5) || !mxIsDouble(prhs[0]) || mxGetM(prhs[1]) != mxGetM(prhs[2]) || mxGetM(prhs[3]) != mxGetM(prhs[4])){ mexErrMsgIdAndTxt(errId, errMsg); } logfile = fopen("rslplog.txt","w"); if(VERBOSE) fprintf(logfile, "== RSLP start ==\n"); cudaProfilerStart(); //receive data from inputs c =(double *)mxGetData(prhs[0]); Aineq =(double *)mxGetData(prhs[1]); bineq =(double *)mxGetData(prhs[2]); Aeq = (double *)mxGetData(prhs[3]); beq = (double *)mxGetData(prhs[4]); //sizes N = (int)mxGetN(prhs[0]); //column of A (number of variables) Mineq = (int)mxGetM(prhs[1]); //row of A (number of unequalities) Meq = (int)mxGetM(prhs[3]); //row of Aeq (number of equalities) int basic_number = Mineq + Meq; int non_basic_number = N - Meq; fprintf(logfile, "basic number = %d + %d = %d\n", Mineq, Meq, basic_number); fprintf(logfile, "non basic number = %d - %d = %d\n", N, Meq, non_basic_number); const int ldA = basic_number; const int ldB = basic_number; int threadsPerBlock = 512 ; int blocksPerGrid = (basic_number + threadsPerBlock -1)/ threadsPerBlock; double alpha = 1.0; double beta = 0; //Malloc cudaMalloc(&deviceMatrix, sizeof(double) * (basic_number) * (basic_number)); cudaMalloc(&deviceVector, sizeof(double) * basic_number ); cudaMalloc(&deviceVectorN, sizeof(double) * N ); cudaMalloc(&deviceC, sizeof(double) * (N+Mineq)); cudaMalloc(&deviceI, sizeof(double) * basic_number * Mineq); cudaMalloc(&deviceAineq, sizeof(double) * Mineq * N); cudaMalloc(&deviceAeq, sizeof(double) * Meq * N); cudaMalloc(&deviceA, sizeof(double) * basic_number * N); //Memcpy A cudaMemcpy(deviceAineq, Aineq, sizeof(double) * Mineq * N, cudaMemcpyHostToDevice); cudaMemcpy(deviceAeq, Aeq, sizeof(double) * Meq * N, cudaMemcpyHostToDevice); initdeviceA<<<blocksPerGrid, threadsPerBlock>>>(deviceA, deviceAineq, deviceAeq, Mineq, Meq, N); double *_A = new double[basic_number * N]; cudaMemcpy(_A, deviceA, sizeof(double) * basic_number * N, cudaMemcpyDeviceToHost); fprintf(logfile, "== A ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < N ; col++){ fprintf(logfile, "%9.9f\t", _A[col*basic_number + row]); } fprintf(logfile, "\n"); } //Memcpy C initZeroVector<<<blocksPerGrid, threadsPerBlock>>>(deviceC, (Mineq+N)); cudaMemcpy(deviceC, c, sizeof(double) * N, cudaMemcpyHostToDevice); //init Identity initIdentityGPU<<<blocksPerGrid, threadsPerBlock>>>(deviceI, Mineq, Meq); double *I = new double[basic_number * Mineq]; cudaMemcpy(I, deviceI, sizeof(double) * basic_number * Mineq, cudaMemcpyDeviceToHost); fprintf(logfile, "== I ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < Mineq ; col++){ fprintf(logfile, "%9.9f\t", I[col*basic_number + row]); } fprintf(logfile, "\n"); } //create outputs plhs[0] = mxCreateDoubleScalar(value); plhs[1] = mxCreateNumericMatrix(N, 1, mxDOUBLE_CLASS, mxREAL); x = ( double *)mxGetData(plhs[1]); //create DeviceInformation and Permution Identity Vector cudaMalloc(&devInfo, sizeof(double)); cudaMalloc(&deviceIpiv, sizeof(int) * basic_number); //setting Handle variable cublasHandle_t handle; cudaStream_t stream = NULL; cusolverDnHandle_t cusolverH = NULL ; cusolverDnCreate(&cusolverH); cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); cusolverDnSetStream(cusolverH, stream); cublasCreate(&handle); //get local work size cusolverDnDgetrf_bufferSize(cusolverH, basic_number, basic_number, deviceMatrix, ldA, &lwork); cudaMalloc((void**)&d_work, sizeof(double)*lwork); //tic auto start = std::chrono::high_resolution_clock::now(); //create all variables for RSLP //define basic_index int *basic_index = new int[basic_number]; for(int i = 0 ; i < basic_number ; i++){ basic_index[i] = N - Meq + i; } //fprintf(logfile, "=== basic_index ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //define non_basic_index int *non_basic_index = new int[non_basic_number + 1]; for(int i = 0; i < non_basic_number + 1 ; i++){ non_basic_index[i] = i; } //fprintf(logfile, "=== non_basic_index ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); //define B double *B = new double[basic_number * basic_number]; //define c_basic double* c_basic = new double[basic_number]; cudaMalloc(&c_basic, sizeof(double) * basic_number ); for (int i = 0 ; i < basic_number ; i++){ CopyCol<<<blocksPerGrid, threadsPerBlock>>>( basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); cudaMemcpy(B + i * basic_number , deviceVector, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); cudaMemcpy(c_basic + i , deviceC + basic_index[i], sizeof(double), cudaMemcpyDeviceToDevice); } //fprintf(logfile, "== B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); //} //define x_B double *x_B = new double[basic_number]; //define P and min_P double *P = new double[basic_number]; double *min_P = new double[basic_number]; //define c_d double c_d = 0 ; double min_c_d ; //define unbound and infeasible condition bool unbound = false; bool infeasible = false; int leave_index, enter_index; //check initial basic solution bool phaseI = false ; cudaMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, cudaMemcpyHostToDevice); cudaMemcpy(deviceVector, bineq, sizeof(double) * Mineq , cudaMemcpyHostToDevice); cudaMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , cudaMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); //calculate x_B = B/b SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); double min_x_B = 999999999999999999999999999999999.0; for ( int i = 0 ; i < basic_number ; i++){ if ( x_B[i] < min_x_B ){ min_x_B = x_B[i]; leave_index = i; //fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } } //fprintf(logfile, "== x_B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", x_B[row]); //} //fprintf(logfile, "\n"); //artificial column double* art = new double[basic_number]; if( min_x_B < 0){ phaseI = true; fprintf(logfile, "== phase I ==\n"); int a = basic_index[leave_index]; fprintf(logfile, "a = %d\n", a); //fprintf(logfile, "a = %d\n", a); basic_index[leave_index] = int(basic_number + non_basic_number + 1); for (int i = 0 ; i < basic_number ; i++ ){ if (basic_index[i] == basic_number + non_basic_number + 1){ cudaMemcpy(c_basic + i, one, sizeof(double), cudaMemcpyHostToDevice); } else{ cudaMemcpy(c_basic + i, zero, sizeof(double), cudaMemcpyHostToDevice); } } non_basic_index[non_basic_number] = a; cudaMalloc(&art, sizeof(double) * basic_number); initart<<<blocksPerGrid, threadsPerBlock>>>(art, basic_number); cudaMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, cudaMemcpyHostToDevice); cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, basic_number, 1, basic_number, &alpha, deviceMatrix, basic_number, art, basic_number, &beta, deviceVector, basic_number); cudaMemcpy(art , deviceVector, sizeof(double) *basic_number, cudaMemcpyDeviceToDevice); //double* host_art = new double[basic_number]; //cudaMemcpy(host_art , art , sizeof(double) *basic_number, cudaMemcpyDeviceToHost); // //fprintf(logfile, "== host_art ==\n"); //for (int i = 0; i < basic_number ;i++){ // fprintf(logfile, "%9.9f\n", host_art[i]); //} //fprintf(logfile, "\n"); cudaMemcpy(B+basic_number*leave_index, art, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); //fprintf(logfile, "== basic variable ==\n"); //for (int i = 0; i < basic_number ;i++){ // fprintf(logfile, "%d\t", basic_index[i]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "== non-basic variable ==\n"); //for (int i = 0; i <non_basic_number + 1 ;i++){ // fprintf(logfile, "%d\t", non_basic_index[i]); //} //fprintf(logfile, "\n"); //double* host_c_basic = new double[basic_number]; //cudaMemcpy(host_c_basic, c_basic, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\t", host_c_basic[row]); //} //fprintf(logfile, "\n"); } while (phaseI == true){ fprintf(logfile, "== iteration %d ==\n",it); cudaMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, cudaMemcpyHostToDevice); cudaMemcpy(deviceVector, bineq, sizeof(double) * Mineq , cudaMemcpyHostToDevice); cudaMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , cudaMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); fprintf(logfile, "== x_B ==\n"); for(int row = 0 ; row < basic_number ; row++){ fprintf(logfile, "%9.9f\n", x_B[+ row]); } fprintf(logfile, "\n"); min_c_d = 9999999999999999999999999999999999999999999999.0 ; //cudaMemcpy(host_c_basic, c_basic, sizeof(double) * M, cudaMemcpyDeviceToHost); //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < M ; row++){ // for(int col = 0 ; col < 1 ; col++){ // fprintf(logfile, "%9.9f\t", host_c_basic[col*M + row]); // } // fprintf(logfile, "\n"); //} //find entering index for(int i = 0 ; i < non_basic_number+1 ; i++){ // === calculate P === if ( non_basic_index[i] == non_basic_number+basic_number+1){ cudaMemcpy(deviceVector, art, sizeof(double) * basic_number, cudaMemcpyDeviceToDevice); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) cublasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); c_d = 1.0 - value ; fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, 1.0, value, c_d); } else{ CopyCol<<<blocksPerGrid, threadsPerBlock>>>(non_basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) cublasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); fprintf(logfile, "c_basic*P = %4.4f \n", value); c_d = 0.0 - value ; fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, 0.0, value, c_d); } fprintf(logfile, "== P ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < 1 ; col++){ fprintf(logfile, "%9.9f\t", P[col*basic_number + row]); } fprintf(logfile, "\n"); } if (c_d <= min_c_d){ min_c_d = c_d; cudaMemcpy(min_P, P, sizeof(double) * basic_number, cudaMemcpyHostToHost); enter_index = i; } } //fprintf(logfile, "min_c_d = %9.9f\n", min_c_d); //fprintf(logfile, "== min_P ==\n"); //for(int row = 0 ; row < basic_index ; row++){ // for(int col = 0 ; col < 1 ; col++){ // fprintf(logfile, "%9.9f\t", min_P[col*basic_index + row]); // } // fprintf(logfile, "\n"); //} if ( min_c_d >= 0 ){ it++; //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); break; } else { unbound = true; for(int i = 0; i < basic_number ; i++){ if (min_P[i] > 0){ unbound = false; fprintf(logfile, "min_P[%d] > 0\n", i); break; } } if(unbound == true){ break; } double m = 999999999999999999999999999999999.0; int repeat = 0; leave_index = -1; for ( int i = 0 ; i < basic_number ; i++){ if(min_P[i] > 0){ double ratio = x_B[i] / min_P[i]; if ( ratio < m && ratio >= 0){ m = ratio; leave_index = i; repeat = 0; fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } else if (ratio == m){ fprintf(logfile, "ratio_%d = %9.9f, m = %9.9f\n", i, ratio, m); repeat++; } } } if (leave_index == -1){ unbound = true; fprintf(logfile, "leave_index = -1\n"); } if (repeat != 0){ unbound = true; fprintf(logfile, "leave_index is repeated\n"); } if (unbound == true){ break; } //fprintf(logfile, "enter_index = %d\n", non_basic_index[enter_index]); //fprintf(logfile, "leave_index = %d\n", basic_index[leave_index]); //update basic feasible solution int a = basic_index[leave_index]; basic_index[leave_index] = non_basic_index[enter_index]; if (non_basic_index[enter_index] == non_basic_number+basic_number+1){ cudaMemcpy(c_basic+leave_index, one, sizeof(double) * 1, cudaMemcpyHostToDevice); } else{ cudaMemcpy(c_basic+leave_index, zero, sizeof(double) * 1, cudaMemcpyHostToDevice); } non_basic_index[enter_index] = a; if (basic_index[leave_index] == non_basic_number+basic_number+1){ cudaMemcpy(B+basic_number*leave_index, art, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); } else{ CopyCol<<<blocksPerGrid, threadsPerBlock>>>(basic_index[leave_index], deviceVector, deviceA, deviceI, basic_number, N); cudaMemcpy(B+basic_number*leave_index, deviceVector, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); } //fprintf(logfile, "== new B ==\n"); //for(int row = 0 ; row < M ; row++){ // for(int col = 0 ; col < M ; col++){ // fprintf(logfile, "%9.9f\t", B[col*M + row]); // } // fprintf(logfile, "\n"); //} //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < M ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < N+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); } it++; } min_x_B = 999999999999999999999999999999999.0; for ( int i = 0 ; i < basic_number ; i++){ if ( x_B[i] < min_x_B ){ min_x_B = x_B[i]; //fprintf(logfile, "min ratio_%d = %9.9f\n", i, ratio); } if(basic_index[i] == basic_number + non_basic_number + 1){ infeasible = true; } } if( min_x_B < 0 && phaseI == true){ infeasible = true; } if (infeasible == true){ fprintf(logfile, "=== infeasible solution ===\n"); } else{ infeasible = false; fprintf(logfile, "=== phase II ===\n"); if (phaseI == true){ for (int i = 0; i < basic_number ; i++){ cudaMemcpy(c_basic + i, deviceC + basic_index[i], sizeof(double), cudaMemcpyDeviceToDevice); } for(int i = 0; i < non_basic_number+1; i++){ if ( non_basic_index[i] == non_basic_number+basic_number+1 ){ for (int j = i+1 ; j < non_basic_number+1 ; j++){ non_basic_index[j-1] = non_basic_index[j]; } break; } } } } //fprintf(logfile, "== new B ==\n"); // for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); // } //fprintf(logfile, "== basic_index ==\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); // //fprintf(logfile, "== non_basic_index ==\n"); //for(int col = 0 ; col < non_basic_number+1 ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); while(infeasible == false){ fprintf(logfile, "== iteration %d ==\n",it); cudaMemcpy(deviceMatrix, B, sizeof(double) * basic_number * basic_number, cudaMemcpyHostToDevice); cudaMemcpy(deviceVector, bineq, sizeof(double) * Mineq , cudaMemcpyHostToDevice); cudaMemcpy(deviceVector + Mineq, beq, sizeof(double) * Meq , cudaMemcpyHostToDevice); LUfactorization(deviceMatrix, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, x_B); fprintf(logfile, "== x_B ==\n"); for(int row = 0 ; row < basic_number ; row++){ fprintf(logfile, "%9.9f\t", x_B[row]); } min_c_d = 9999999999999999999999999999999999999999999999.0 ; //double *host_c_basic = new double[basic_number]; //cudaMemcpy(host_c_basic, c_basic, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); // //fprintf(logfile, "== c_basic ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", host_c_basic[row]); //} //CopyCol<<<blocksPerGrid, threadsPerBlock>>>(non_basic_index[0], deviceVector, deviceA, deviceI, basic_number, N); //cudaMemcpy(host_c_basic, deviceVector, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); // //fprintf(logfile, "== cpyCol ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // fprintf(logfile, "%9.9f\n", host_c_basic[row]); //} //find entering index for(int i = 0 ; i < non_basic_number ; i++){ // === calculate P === CopyCol<<<blocksPerGrid, threadsPerBlock>>>(non_basic_index[i], deviceVector, deviceA, deviceI, basic_number, N); SolveLinear(deviceMatrix, deviceVector, basic_number, deviceIpiv, d_work, devInfo, cusolverH, ldA, ldB, P); //calculate c_d(i) cublasDdot (handle, basic_number, deviceVector, 1, c_basic, 1, &value); c_d = c[non_basic_index[i]] - value ; //fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, c[non_basic_index[i]], value, c_d); if (c_d <= min_c_d){ min_c_d = c_d; cudaMemcpy(min_P, P, sizeof(double) * basic_number, cudaMemcpyHostToHost); enter_index = i; //fprintf(logfile, "c_d_%d = %4.4f - %4.4f = %4.4f \n", i, c[non_basic_index[i]], value, c_d); } } fprintf(logfile, "min_c_d = %9.9f\n", min_c_d); fprintf(logfile, "== min_P ==\n"); for(int row = 0 ; row < basic_number ; row++){ for(int col = 0 ; col < 1 ; col++){ fprintf(logfile, "%9.9f\t", min_P[col*basic_number + row]); } fprintf(logfile, "\n"); } if ( min_c_d >= 0 ){ it++; //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); break; } else { unbound = true; for(int i = 0; i < basic_number ; i++){ if (min_P[i] > 0){ unbound = false; fprintf(logfile, "min_P[%d] > 0\n", i); break; } } if(unbound == true){ break; } double m = 999999999999999999999999999999999.0; int repeat = 0; leave_index = -1; for ( int i = 0 ; i < basic_number ; i++){ if(min_P[i] > 0){ double ratio = x_B[i] / min_P[i]; if ( ratio < m && ratio >= 0){ m = ratio; leave_index = i; repeat = 0; fprintf(logfile, "min ratio_%d = %9.9f / %9.9f = %9.9f\n", i, x_B[i], min_P[i], ratio); } else if (ratio == m){ fprintf(logfile, "ratio_%d = %9.9f, m = %9.9f\n", i, ratio, m); repeat++; } } } if (leave_index == -1){ unbound = true; fprintf(logfile, "leave_index = -1\n"); } if (repeat != 0){ unbound = true; fprintf(logfile, "leave_index is repeated\n"); } if (unbound == true){ break; } fprintf(logfile, "enter_index = %d\n", non_basic_index[enter_index]); fprintf(logfile, "leave_index = %d\n", basic_index[leave_index]); //update basic feasible solution int a = basic_index[leave_index]; basic_index[leave_index] = non_basic_index[enter_index]; cudaMemcpy(c_basic+leave_index, deviceC + non_basic_index[enter_index], sizeof(double) * 1, cudaMemcpyHostToDevice); non_basic_index[enter_index] = a; CopyCol<<<blocksPerGrid, threadsPerBlock>>>(basic_index[leave_index], deviceVector, deviceA, deviceI, basic_number, N); cudaMemcpy(B+basic_number*leave_index, deviceVector, sizeof(double) * basic_number, cudaMemcpyDeviceToHost); //fprintf(logfile, "== new B ==\n"); //for(int row = 0 ; row < basic_number ; row++){ // for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%9.9f\t", B[col*basic_number + row]); // } // fprintf(logfile, "\n"); //} //fprintf(logfile, "=== basic variable ===\n"); //for(int col = 0 ; col < basic_number ; col++){ // fprintf(logfile, "%d\t", basic_index[col]); //} //fprintf(logfile, "\n"); //fprintf(logfile, "=== non-basic variable ===\n"); //for(int col = 0 ; col < non_basic_number ; col++){ // fprintf(logfile, "%d\t", non_basic_index[col]); //} //fprintf(logfile, "\n"); } it++; } if(infeasible == true){ value = 0.0; //plhs[0] = mxCreateDoubleScalar(value); } else{ if (unbound == false){ fprintf(logfile, "=== optimal solution ===\n"); for(int i = 0; i < basic_number ; i++) { if(basic_index[i] < N){ x[basic_index[i]] = x_B[i]; } } cudaMemcpy(deviceVectorN, x, sizeof(double) * N, cudaMemcpyHostToDevice); cublasDdot (handle, N, deviceVectorN, 1, deviceC, 1, &value); plhs[0] = mxCreateDoubleScalar(value); } else{ fprintf(logfile, "=== unbounded solution ===\n"); value = 0.0; //plhs[0] = mxCreateDoubleScalar(value); } } //toc auto finish = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = finish - start; fprintf(logfile, "Runtime = %lf \n", elapsed.count()); fprintf(logfile, "minimum value = %9.9f\n", value); fprintf(logfile, "iteration = %d\n", it); cublasDestroy(handle); cusolverDnDestroy(cusolverH); cudaStreamDestroy(stream); if(phaseI == true)cudaFree(art); cudaFree(devInfo); cudaFree(d_work); cudaFree(deviceIpiv); cudaFree(deviceMatrix); cudaFree(deviceVector); cudaFree(deviceVectorN); cudaFree(deviceC); cudaFree(deviceI); cudaFree(deviceAineq); cudaFree(deviceAeq); cudaFree(deviceA); cudaFree(c_basic); cudaDeviceReset(); cudaProfilerStop(); if(VERBOSE) fprintf(logfile, "== RSLP finished ==\n"); fclose(logfile); }
f64ced104d6414adf9d64f1243e6c66e52190cc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <glog/logging.h> #include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { namespace details { template <typename T> struct Add { __device__ T operator()(const T &a, const T &b) const { return a + b; } }; template <typename T> struct Mul { __device__ T operator()(const T &a, const T &b) const { return a * b; } }; } // namespace details template <typename T, typename Operator> __global__ void elementwise_kernel(const size_t total, const T *x_data, const T *y_data, T *out_data, int pre, int n, int post, Operator op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < total) { int idx = tid / post % n; #if __CUDA_ARCH__ >= 350 out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx)); #else out_data[tid] = op(x_data[tid], y_data[idx]); #endif } } nvinfer1::Dims ElementWisePlugin::getOutputDimensions( int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "There is only one output in TRT elementwise " "op plugin, but got output index: %d.", index)); PADDLE_ENFORCE_EQ(num_inputs, 2, platform::errors::InvalidArgument( "There are 2 inputs in TRT elementwise " "op plugin, but got input number: %d.", num_inputs)); PADDLE_ENFORCE_NOT_NULL( input_dims, platform::errors::InvalidArgument( "The input dims of TRT elementwise op plugin should not be null.")); return input_dims[0]; } int ElementWisePlugin::initialize() TRT_NOEXCEPT { axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_; int trimed_nb_dims = dims_y_.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (dims_y_.d[trimed_nb_dims - 1] != 1) { break; } } dims_y_.nbDims = trimed_nb_dims; PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_, platform::errors::InvalidArgument( "We expect [number of x dims] >= [number of y dims + " "axis] in TRT elementwise op plugin, but got [number " "of x dims] = %d, [number of y dims + axis] = %d.", dims_x_.nbDims, dims_y_.nbDims + axis_)); PADDLE_ENFORCE_LT( axis_, dims_x_.nbDims, platform::errors::InvalidArgument("We expect [axis] < [number of x dims] " "in TRT elementwise op plugin, but got " "[axis] = %d, [number of x dims] = %d.", axis_, dims_x_.nbDims)); prev_size_ = 1; midd_size_ = 1; post_size_ = 1; for (int i = 0; i < axis_; ++i) { prev_size_ *= dims_x_.d[i]; } for (int i = 0; i < dims_y_.nbDims; ++i) { PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch. The dims of input Y " "should be a subsequence of X.")); midd_size_ *= dims_y_.d[i]; } for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) { post_size_ *= dims_x_.d[i]; } return 0; } int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif hipStream_t stream) TRT_NOEXCEPT { const float *x = reinterpret_cast<const float *>(inputs[0]); const float *y = reinterpret_cast<const float *>(inputs[1]); float *out = reinterpret_cast<float *>(outputs[0]); int num = batch_size * prev_size_ * midd_size_ * post_size_; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Add<float>()); } else if (type_ == "mul") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Mul<float>()); } else { PADDLE_THROW(platform::errors::Fatal( "The %s type elementwise is not implemented in trt plugin.", type_)); } return hipGetLastError() != hipSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { return SerializedSize(type_.c_str()) + SerializedSize(axis_); } void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, type_.c_str()); SerializeValue(&buffer, axis_); } nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool ElementwisePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Elementwise Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } int ElementwisePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto x_dims = input_desc[0].dims; auto y_dims = input_desc[1].dims; int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_; int batch_size = x_dims.d[0]; int prev_size = 1; int midd_size = 1; int post_size = 1; for (int i = 0; i < axis; ++i) { prev_size *= x_dims.d[i]; } int trimed_nb_dims = y_dims.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (y_dims.d[trimed_nb_dims - 1] != 1) { break; } } for (int i = 0; i < trimed_nb_dims; ++i) { PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch found in trt " "elementwise plugin's x and y input.")); midd_size *= y_dims.d[i]; } for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) { post_size *= x_dims.d[i]; } const float *x = static_cast<const float *>(inputs[0]); const float *y = static_cast<const float *>(inputs[1]); float *out = static_cast<float *>(outputs[0]); int num = prev_size * midd_size * post_size; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Add<float>()); } else if (type_ == "mul") { hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "Paddle-TRT only support elementwise operation: {add, mul} currently, " "but got %s.", type_)); } return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
f64ced104d6414adf9d64f1243e6c66e52190cc6.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <glog/logging.h> #include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { namespace details { template <typename T> struct Add { __device__ T operator()(const T &a, const T &b) const { return a + b; } }; template <typename T> struct Mul { __device__ T operator()(const T &a, const T &b) const { return a * b; } }; } // namespace details template <typename T, typename Operator> __global__ void elementwise_kernel(const size_t total, const T *x_data, const T *y_data, T *out_data, int pre, int n, int post, Operator op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < total) { int idx = tid / post % n; #if __CUDA_ARCH__ >= 350 out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx)); #else out_data[tid] = op(x_data[tid], y_data[idx]); #endif } } nvinfer1::Dims ElementWisePlugin::getOutputDimensions( int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "There is only one output in TRT elementwise " "op plugin, but got output index: %d.", index)); PADDLE_ENFORCE_EQ(num_inputs, 2, platform::errors::InvalidArgument( "There are 2 inputs in TRT elementwise " "op plugin, but got input number: %d.", num_inputs)); PADDLE_ENFORCE_NOT_NULL( input_dims, platform::errors::InvalidArgument( "The input dims of TRT elementwise op plugin should not be null.")); return input_dims[0]; } int ElementWisePlugin::initialize() TRT_NOEXCEPT { axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_; int trimed_nb_dims = dims_y_.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (dims_y_.d[trimed_nb_dims - 1] != 1) { break; } } dims_y_.nbDims = trimed_nb_dims; PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_, platform::errors::InvalidArgument( "We expect [number of x dims] >= [number of y dims + " "axis] in TRT elementwise op plugin, but got [number " "of x dims] = %d, [number of y dims + axis] = %d.", dims_x_.nbDims, dims_y_.nbDims + axis_)); PADDLE_ENFORCE_LT( axis_, dims_x_.nbDims, platform::errors::InvalidArgument("We expect [axis] < [number of x dims] " "in TRT elementwise op plugin, but got " "[axis] = %d, [number of x dims] = %d.", axis_, dims_x_.nbDims)); prev_size_ = 1; midd_size_ = 1; post_size_ = 1; for (int i = 0; i < axis_; ++i) { prev_size_ *= dims_x_.d[i]; } for (int i = 0; i < dims_y_.nbDims; ++i) { PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch. The dims of input Y " "should be a subsequence of X.")); midd_size_ *= dims_y_.d[i]; } for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) { post_size_ *= dims_x_.d[i]; } return 0; } int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif cudaStream_t stream) TRT_NOEXCEPT { const float *x = reinterpret_cast<const float *>(inputs[0]); const float *y = reinterpret_cast<const float *>(inputs[1]); float *out = reinterpret_cast<float *>(outputs[0]); int num = batch_size * prev_size_ * midd_size_ * post_size_; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Add<float>()); } else if (type_ == "mul") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size_, batch_size * midd_size_, post_size_, details::Mul<float>()); } else { PADDLE_THROW(platform::errors::Fatal( "The %s type elementwise is not implemented in trt plugin.", type_)); } return cudaGetLastError() != cudaSuccess; } // Dynamic Plugin below. #if IS_TRT_VERSION_GE(6000) int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; } size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT { return SerializedSize(type_.c_str()) + SerializedSize(axis_); } void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { SerializeValue(&buffer, type_.c_str()); SerializeValue(&buffer, axis_); } nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputs[0]; } bool ElementwisePluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); (in_out && pos < (nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The Elementwise Plugin only has one input, so the " "index value should be 0, but get %d.", index)); return input_types[0]; } int ElementwisePluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto x_dims = input_desc[0].dims; auto y_dims = input_desc[1].dims; int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_; int batch_size = x_dims.d[0]; int prev_size = 1; int midd_size = 1; int post_size = 1; for (int i = 0; i < axis; ++i) { prev_size *= x_dims.d[i]; } int trimed_nb_dims = y_dims.nbDims; for (; trimed_nb_dims > 0; --trimed_nb_dims) { if (y_dims.d[trimed_nb_dims - 1] != 1) { break; } } for (int i = 0; i < trimed_nb_dims; ++i) { PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i], platform::errors::InvalidArgument( "Broadcast dimension mismatch found in trt " "elementwise plugin's x and y input.")); midd_size *= y_dims.d[i]; } for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) { post_size *= x_dims.d[i]; } const float *x = static_cast<const float *>(inputs[0]); const float *y = static_cast<const float *>(inputs[1]); float *out = static_cast<float *>(outputs[0]); int num = prev_size * midd_size * post_size; int thread = 256; int block = (num + thread - 1) / thread; if (type_ == "add") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Add<float>()); } else if (type_ == "mul") { elementwise_kernel<<<block, thread, 0, stream>>>( num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>()); } else { PADDLE_THROW(platform::errors::Unimplemented( "Paddle-TRT only support elementwise operation: {add, mul} currently, " "but got %s.", type_)); } return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
663296914d6c0247a817be1504a1a176e973984b.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <hip/hip_runtime.h> //#include <hiprand/hiprand.h> // includes random num stuff //#include <hiprand/hiprand_kernel.h> // more rand stuff #include <hip/hip_texture_types.h> #include <stdio.h> #include <stdlib.h> #include "gpu_main.h" #include "params.h" #include "geometry.h" // define texture memory texture<float, 2> texGray; texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /*************************************************************************/ int drawGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { hipLaunchKernelGGL(( dev_drawGeometries) , dim3(gpuPallete.gBlocks), dim3(gpuPallete.gThreads) , 0, 0, gpuPallete, gpuGeometries, params); return 0; } int updateGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { hipLaunchKernelGGL(( dev_updateGeometries) , dim3(gpuPallete.gBlocks), dim3(gpuPallete.gThreads) , 0, 0, gpuPallete, gpuGeometries, gpuMidi, params); return 0; } /*************************************************************************/ __global__ void dev_drawGeometries(GPU_Palette P1, GPU_Geometries G1, AParams params) { int num_geometries = G1.size; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); // reset working pixel to appropriate background P1.red[vecIdx] = P1.red_background[vecIdx]; P1.blue[vecIdx] = P1.blue_background[vecIdx]; P1.green[vecIdx] = P1.green_background[vecIdx]; P1.gray[vecIdx] = P1.gray_background[vecIdx]; for (int i = 0; i < num_geometries; i++) { Geometry g = G1.geometries[i]; //if(vecIdx == 0) { //printf("Printing Geometry %d\n", i); //printGeometry(g); //} if (g.displayOn == true) { switch(g.type) { case CIRCLE : drawCircle(P1, g, x, y, vecIdx, params); break; } } } } __global__ void dev_updateGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { int num_geometries = gpuGeometries.size; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if (vecIdx < num_geometries) { int soundIndex = gpuMidi.trackLength * vecIdx + params.curFrame; SoundEvent curSound = gpuMidi.tracks[soundIndex]; Geometry g= gpuGeometries.geometries[vecIdx]; switch(g.type) { case CIRCLE : g = updateCircle(g, gpuPallete, curSound); gpuGeometries.geometries[vecIdx] = g; break; } } } /*************************************************************************/ /* * Updates P1's rgb values at pixelX, pixelY to reflect circle in goemetry g */ __device__ void drawCircle(GPU_Palette P1, Geometry g, int pixelX, int pixelY, int vecIdx, AParams params) { Circle c; c = g.shape.circle; int centerX, centerY; centerX = c.center.x; centerY = c.center.y; double distance; distance = sqrtf((pixelX - centerX) * (pixelX - centerX) + (pixelY - centerY) * (pixelY - centerY)); if (c.fill) { if (distance < (c.radius + g.thickness)) { if (params.afterimage) { (P1.red_background)[vecIdx] = ((P1.red_background)[vecIdx] + g.color.r) / 2; (P1.green_background)[vecIdx] = ((P1.green_background)[vecIdx] + g.color.g) / 2; (P1.blue_background)[vecIdx] = ((P1.blue_background)[vecIdx] + g.color.b) / 2; } else { (P1.red)[vecIdx] = g.color.r; (P1.green)[vecIdx] = g.color.g; (P1.blue)[vecIdx] = g.color.b; } } } else { if (abs(distance - (double) c.radius) <= g.thickness) { if (params.afterimage) { (P1.red_background)[vecIdx] = ((P1.red_background)[vecIdx] + g.color.r) / 2; (P1.green_background)[vecIdx] = ((P1.green_background)[vecIdx] + g.color.g) / 2; (P1.blue_background)[vecIdx] = ((P1.blue_background)[vecIdx] + g.color.b) / 2; } else { (P1.red)[vecIdx] = g.color.r; (P1.green)[vecIdx] = g.color.g; (P1.blue)[vecIdx] = g.color.b; } } } } /* * Updates and returns geometry g based properties * TODO: code here is kind of unelegant */ __device__ Geometry updateCircle(Geometry g, GPU_Palette P1, SoundEvent currentSound) { Properties p; Circle c; if (currentSound.pitch < 20 || currentSound.volume == 0){ c.radius = 0; g.shape.circle = c; g.properties = p; g.displayOn = false; p.lastSoundEvent = currentSound; return g; } c = g.shape.circle; p = g.properties; // update position c.center.x = c.center.x + p.momentumX; c.center.y = c.center.y + p.momentumY; if(c.center.x < 0 || c.center.x > P1.print_width) p.momentumX = -1 * p.momentumX; if(c.center.y < 0 || c.center.y > P1.print_height) p.momentumY = -1 * p.momentumY; //update color if (currentSound.pitch > 20 && currentSound.pitch < 110) { int scaledPitch = ( (double) currentSound.pitch - 20.0)/90.0 * 256 * 256 * 256; int rRaw, gRaw, bRaw; rRaw = (scaledPitch % 256); gRaw = ((scaledPitch / 256) % 256); bRaw = ((scaledPitch / (256 * 256)) % 256); g.color.r = ((double) rRaw) / 256.0; g.color.g = ((double) gRaw) / 256.0; g.color.b = ((double) bRaw) / 256.0; g.displayOn = true; } // update radius if (g.properties.lastSoundEvent.volume == currentSound.volume && g.properties.lastSoundEvent.pitch == currentSound.pitch) { p.timeSinceLastChanged++; } else { // volume of 200 should have radius f half the screen int VolumeToRadiusScalingFactor = ((P1.print_width + P1.print_height) /(2 * 2)) / 200; int oldRadiusBound = p.lastSoundEvent.volume * VolumeToRadiusScalingFactor; int newRadiusBound = currentSound.volume * VolumeToRadiusScalingFactor; p.min_radius = min(oldRadiusBound, newRadiusBound); p.max_radius = max(oldRadiusBound, newRadiusBound); if(oldRadiusBound < newRadiusBound) p.growthRate = 1; else p.growthRate = -1; p.timeSinceLastChanged = 0; p.lastSoundEvent = currentSound; } double newRadius; if (p.growthRate > 0 && c.radius < p.max_radius) { c.radius = c.radius + (p.max_radius - c.radius) / 2; //newRadius = p.min_radius + atanf((double) p.timeSinceLastChanged / (double) 10) * (p.max_radius - p.min_radius) * 2/3.14; //c.radius = (int) newRadius; } else if (p.growthRate < 0 && c.radius > p.min_radius) { c.radius = c.radius - (c.radius - p.max_radius) / 2; //newRadius = p.max_radius - atanf((double) p.timeSinceLastChanged / (double) 10) * (p.max_radius - p.min_radius) * 2/3.14; //c.radius = (int) newRadius; } //printf("%d \n", newRadius); g.shape.circle = c; g.properties = p; //printf("displaying thing \n"); return g; } __device__ void printGeometry(Geometry g) { switch (g.type) { case CIRCLE: printf("Cicle\n"); break; } } GPU_Palette initGPUPalette(AParams* PARAMS) { // load GPU_Palette P; P.gTPB = THREADS_PER_BLOCK; // threads per block //P.gDIM = 800; // assumes the image is 800x800 P.gWidth = PARAMS->width; P.gHeight = PARAMS->height; P.print_height = PARAMS -> print_height; P.print_width = PARAMS -> print_width; // 800x800 palette = 25x25 grid of 32x32 threadblocks P.gSize = P.gWidth * P.gHeight * sizeof(float); P.gThreads.x = P.gTPB; P.gThreads.y = P.gTPB; P.gThreads.z = 1; // 3D of threads allowed P.gBlocks.x = (P.gWidth + P.gTPB - 1) / P.gTPB; P.gBlocks.y = (P.gHeight + P.gTPB - 1) / P.gTPB; P.gBlocks.z = 1; // only 2D of blocks allowe // allocate memory for the palette hipMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb) hipMalloc((void**) &P.red, P.gSize); // r hipMalloc((void**) &P.green, P.gSize); // g hipMalloc((void**) &P.blue, P.gSize); // b hipMalloc((void**) &P.gray_background, P.gSize); // black and white (avg of rgb) hipMalloc((void**) &P.red_background, P.gSize); // r hipMalloc((void**) &P.green_background, P.gSize); // g hipMalloc((void**) &P.blue_background, P.gSize); // b // create texture memory and bind to black and white data hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); /* * Unclear if these should be bound to background memory or display memory TODO */ hipBindTexture2D(NULL, texBlue, P.blue, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texGreen, P.green, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texRed, P.red, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texGray, P.gray, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); return P; } GPU_Geometries initGPUGeometries(int size, Geometry *cpuGeometries) { GPU_Geometries G; G.size = size; int mem_size; mem_size = size * sizeof(Geometry); hipMalloc((void **) &G.geometries, mem_size); hipMemcpy(G.geometries, cpuGeometries, mem_size, cH2D); //hipChannelFormatDesc desc = hipCreateChannelDesc<Geometry>(); //hipBindTexture(NULL, texGeometries, G.geometries, mem_size); return G; } GPU_Midi initGPUMidi(DiscreteTracks dts) { GPU_Midi m; m.numTracks = dts.numTracks; m.trackLength = dts.trackLength; int row_mem_size; row_mem_size = dts.trackLength * sizeof(SoundEvent); hipMalloc((void**) &m.tracks, dts.numTracks * row_mem_size); // flatten array and copy into CUDA for (int i = 0; i < dts.numTracks; i++) { int flatIndex = i * dts.trackLength; hipMemcpy(&(m.tracks[flatIndex]), dts.tracks[i], row_mem_size, cH2D); } return m; } /*************************************************************************/ int freeGPUPalette(GPU_Palette* P) { // free texture memory hipUnbindTexture(texGray); hipUnbindTexture(texRed); hipUnbindTexture(texGreen); hipUnbindTexture(texBlue); // free gpu memory hipFree(P->gray); hipFree(P->red); hipFree(P->green); hipFree(P->blue); return 0; } /*************************************************************************/
663296914d6c0247a817be1504a1a176e973984b.cu
#include <math.h> #include <cuda.h> //#include <curand.h> // includes random num stuff //#include <curand_kernel.h> // more rand stuff #include <cuda_texture_types.h> #include <stdio.h> #include <stdlib.h> #include "gpu_main.h" #include "params.h" #include "geometry.h" // define texture memory texture<float, 2> texGray; texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /*************************************************************************/ int drawGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { dev_drawGeometries <<< gpuPallete.gBlocks, gpuPallete.gThreads >>> (gpuPallete, gpuGeometries, params); return 0; } int updateGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { dev_updateGeometries <<< gpuPallete.gBlocks, gpuPallete.gThreads >>> (gpuPallete, gpuGeometries, gpuMidi, params); return 0; } /*************************************************************************/ __global__ void dev_drawGeometries(GPU_Palette P1, GPU_Geometries G1, AParams params) { int num_geometries = G1.size; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); // reset working pixel to appropriate background P1.red[vecIdx] = P1.red_background[vecIdx]; P1.blue[vecIdx] = P1.blue_background[vecIdx]; P1.green[vecIdx] = P1.green_background[vecIdx]; P1.gray[vecIdx] = P1.gray_background[vecIdx]; for (int i = 0; i < num_geometries; i++) { Geometry g = G1.geometries[i]; //if(vecIdx == 0) { //printf("Printing Geometry %d\n", i); //printGeometry(g); //} if (g.displayOn == true) { switch(g.type) { case CIRCLE : drawCircle(P1, g, x, y, vecIdx, params); break; } } } } __global__ void dev_updateGeometries(GPU_Palette gpuPallete, GPU_Geometries gpuGeometries, GPU_Midi gpuMidi, AParams params) { int num_geometries = gpuGeometries.size; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if (vecIdx < num_geometries) { int soundIndex = gpuMidi.trackLength * vecIdx + params.curFrame; SoundEvent curSound = gpuMidi.tracks[soundIndex]; Geometry g= gpuGeometries.geometries[vecIdx]; switch(g.type) { case CIRCLE : g = updateCircle(g, gpuPallete, curSound); gpuGeometries.geometries[vecIdx] = g; break; } } } /*************************************************************************/ /* * Updates P1's rgb values at pixelX, pixelY to reflect circle in goemetry g */ __device__ void drawCircle(GPU_Palette P1, Geometry g, int pixelX, int pixelY, int vecIdx, AParams params) { Circle c; c = g.shape.circle; int centerX, centerY; centerX = c.center.x; centerY = c.center.y; double distance; distance = sqrtf((pixelX - centerX) * (pixelX - centerX) + (pixelY - centerY) * (pixelY - centerY)); if (c.fill) { if (distance < (c.radius + g.thickness)) { if (params.afterimage) { (P1.red_background)[vecIdx] = ((P1.red_background)[vecIdx] + g.color.r) / 2; (P1.green_background)[vecIdx] = ((P1.green_background)[vecIdx] + g.color.g) / 2; (P1.blue_background)[vecIdx] = ((P1.blue_background)[vecIdx] + g.color.b) / 2; } else { (P1.red)[vecIdx] = g.color.r; (P1.green)[vecIdx] = g.color.g; (P1.blue)[vecIdx] = g.color.b; } } } else { if (abs(distance - (double) c.radius) <= g.thickness) { if (params.afterimage) { (P1.red_background)[vecIdx] = ((P1.red_background)[vecIdx] + g.color.r) / 2; (P1.green_background)[vecIdx] = ((P1.green_background)[vecIdx] + g.color.g) / 2; (P1.blue_background)[vecIdx] = ((P1.blue_background)[vecIdx] + g.color.b) / 2; } else { (P1.red)[vecIdx] = g.color.r; (P1.green)[vecIdx] = g.color.g; (P1.blue)[vecIdx] = g.color.b; } } } } /* * Updates and returns geometry g based properties * TODO: code here is kind of unelegant */ __device__ Geometry updateCircle(Geometry g, GPU_Palette P1, SoundEvent currentSound) { Properties p; Circle c; if (currentSound.pitch < 20 || currentSound.volume == 0){ c.radius = 0; g.shape.circle = c; g.properties = p; g.displayOn = false; p.lastSoundEvent = currentSound; return g; } c = g.shape.circle; p = g.properties; // update position c.center.x = c.center.x + p.momentumX; c.center.y = c.center.y + p.momentumY; if(c.center.x < 0 || c.center.x > P1.print_width) p.momentumX = -1 * p.momentumX; if(c.center.y < 0 || c.center.y > P1.print_height) p.momentumY = -1 * p.momentumY; //update color if (currentSound.pitch > 20 && currentSound.pitch < 110) { int scaledPitch = ( (double) currentSound.pitch - 20.0)/90.0 * 256 * 256 * 256; int rRaw, gRaw, bRaw; rRaw = (scaledPitch % 256); gRaw = ((scaledPitch / 256) % 256); bRaw = ((scaledPitch / (256 * 256)) % 256); g.color.r = ((double) rRaw) / 256.0; g.color.g = ((double) gRaw) / 256.0; g.color.b = ((double) bRaw) / 256.0; g.displayOn = true; } // update radius if (g.properties.lastSoundEvent.volume == currentSound.volume && g.properties.lastSoundEvent.pitch == currentSound.pitch) { p.timeSinceLastChanged++; } else { // volume of 200 should have radius f half the screen int VolumeToRadiusScalingFactor = ((P1.print_width + P1.print_height) /(2 * 2)) / 200; int oldRadiusBound = p.lastSoundEvent.volume * VolumeToRadiusScalingFactor; int newRadiusBound = currentSound.volume * VolumeToRadiusScalingFactor; p.min_radius = min(oldRadiusBound, newRadiusBound); p.max_radius = max(oldRadiusBound, newRadiusBound); if(oldRadiusBound < newRadiusBound) p.growthRate = 1; else p.growthRate = -1; p.timeSinceLastChanged = 0; p.lastSoundEvent = currentSound; } double newRadius; if (p.growthRate > 0 && c.radius < p.max_radius) { c.radius = c.radius + (p.max_radius - c.radius) / 2; //newRadius = p.min_radius + atanf((double) p.timeSinceLastChanged / (double) 10) * (p.max_radius - p.min_radius) * 2/3.14; //c.radius = (int) newRadius; } else if (p.growthRate < 0 && c.radius > p.min_radius) { c.radius = c.radius - (c.radius - p.max_radius) / 2; //newRadius = p.max_radius - atanf((double) p.timeSinceLastChanged / (double) 10) * (p.max_radius - p.min_radius) * 2/3.14; //c.radius = (int) newRadius; } //printf("%d \n", newRadius); g.shape.circle = c; g.properties = p; //printf("displaying thing \n"); return g; } __device__ void printGeometry(Geometry g) { switch (g.type) { case CIRCLE: printf("Cicle\n"); break; } } GPU_Palette initGPUPalette(AParams* PARAMS) { // load GPU_Palette P; P.gTPB = THREADS_PER_BLOCK; // threads per block //P.gDIM = 800; // assumes the image is 800x800 P.gWidth = PARAMS->width; P.gHeight = PARAMS->height; P.print_height = PARAMS -> print_height; P.print_width = PARAMS -> print_width; // 800x800 palette = 25x25 grid of 32x32 threadblocks P.gSize = P.gWidth * P.gHeight * sizeof(float); P.gThreads.x = P.gTPB; P.gThreads.y = P.gTPB; P.gThreads.z = 1; // 3D of threads allowed P.gBlocks.x = (P.gWidth + P.gTPB - 1) / P.gTPB; P.gBlocks.y = (P.gHeight + P.gTPB - 1) / P.gTPB; P.gBlocks.z = 1; // only 2D of blocks allowe // allocate memory for the palette cudaMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb) cudaMalloc((void**) &P.red, P.gSize); // r cudaMalloc((void**) &P.green, P.gSize); // g cudaMalloc((void**) &P.blue, P.gSize); // b cudaMalloc((void**) &P.gray_background, P.gSize); // black and white (avg of rgb) cudaMalloc((void**) &P.red_background, P.gSize); // r cudaMalloc((void**) &P.green_background, P.gSize); // g cudaMalloc((void**) &P.blue_background, P.gSize); // b // create texture memory and bind to black and white data cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); /* * Unclear if these should be bound to background memory or display memory TODO */ cudaBindTexture2D(NULL, texBlue, P.blue, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texGreen, P.green, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texRed, P.red, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texGray, P.gray, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); return P; } GPU_Geometries initGPUGeometries(int size, Geometry *cpuGeometries) { GPU_Geometries G; G.size = size; int mem_size; mem_size = size * sizeof(Geometry); cudaMalloc((void **) &G.geometries, mem_size); cudaMemcpy(G.geometries, cpuGeometries, mem_size, cH2D); //cudaChannelFormatDesc desc = cudaCreateChannelDesc<Geometry>(); //cudaBindTexture(NULL, texGeometries, G.geometries, mem_size); return G; } GPU_Midi initGPUMidi(DiscreteTracks dts) { GPU_Midi m; m.numTracks = dts.numTracks; m.trackLength = dts.trackLength; int row_mem_size; row_mem_size = dts.trackLength * sizeof(SoundEvent); cudaMalloc((void**) &m.tracks, dts.numTracks * row_mem_size); // flatten array and copy into CUDA for (int i = 0; i < dts.numTracks; i++) { int flatIndex = i * dts.trackLength; cudaMemcpy(&(m.tracks[flatIndex]), dts.tracks[i], row_mem_size, cH2D); } return m; } /*************************************************************************/ int freeGPUPalette(GPU_Palette* P) { // free texture memory cudaUnbindTexture(texGray); cudaUnbindTexture(texRed); cudaUnbindTexture(texGreen); cudaUnbindTexture(texBlue); // free gpu memory cudaFree(P->gray); cudaFree(P->red); cudaFree(P->green); cudaFree(P->blue); return 0; } /*************************************************************************/
2b47acaa4ad2c67b8bc625d0bf0c9db8eee8aa85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // extern THCState *state; THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
2b47acaa4ad2c67b8bc625d0bf0c9db8eee8aa85.cu
#include <vector> #include "cuda/dcn_v2_im2col_cuda.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // extern THCState *state; THCState *state = at::globalContext().lazyInitCUDA(); // author: Charles Shang // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu // [batch gemm] // https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu __global__ void createBatchGemmBuffer(const float **input_b, float **output_b, float **columns_b, const float **ones_b, const float **weight_b, const float **bias_b, float *input, float *output, float *columns, float *ones, float *weight, float *bias, const int input_stride, const int output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { input_b[idx] = input + idx * input_stride; output_b[idx] = output + idx * output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; bias_b[idx] = bias; } } at::Tensor dcn_v2_cuda_forward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int deformable_group) { using scalar_t = float; // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); // printf("Channels: %d %d\n", channels, channels_kernel); // printf("Channels: %d %d\n", channels_out, channels_kernel); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({batch, height_out, width_out}, input.options()); auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); // prepare for batch-wise computing, which is significantly faster than instance-wise computing // when batch size is large. // launch batch threads int matrices_size = batch * sizeof(float *); auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size)); auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size)); const int block = 128; const int grid = (batch + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input_b, output_b, columns_b, ones_b, weight_b, bias_b, input.data<scalar_t>(), output.data<scalar_t>(), columns.data<scalar_t>(), ones.data<scalar_t>(), weight.data<scalar_t>(), bias.data<scalar_t>(), channels * width * height, channels_out * width_out * height_out, channels * kernel_h * kernel_w * height_out * width_out, height_out * width_out, batch); long m_ = channels_out; long n_ = height_out * width_out; long k_ = 1; THCudaBlas_SgemmBatched(state, 't', 'n', n_, m_, k_, 1.0f, ones_b, k_, bias_b, k_, 0.0f, output_b, n_, batch); modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input.data<scalar_t>(), offset.data<scalar_t>(), mask.data<scalar_t>(), batch, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m = channels_out; long n = height_out * width_out; long k = channels * kernel_h * kernel_w; THCudaBlas_SgemmBatched(state, 'n', 'n', n, m, k, 1.0f, (const float **)columns_b, n, weight_b, k, 1.0f, output_b, n, batch); THCudaFree(state, input_b); THCudaFree(state, output_b); THCudaFree(state, columns_b); THCudaFree(state, ones_b); THCudaFree(state, weight_b); THCudaFree(state, bias_b); return output; } __global__ void createBatchGemmBufferBackward( float **grad_output_b, float **columns_b, float **ones_b, float **weight_b, float **grad_weight_b, float **grad_bias_b, float *grad_output, float *columns, float *ones, float *weight, float *grad_weight, float *grad_bias, const int grad_output_stride, const int columns_stride, const int ones_stride, const int num_batches) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { grad_output_b[idx] = grad_output + idx * grad_output_stride; columns_b[idx] = columns + idx * columns_stride; ones_b[idx] = ones + idx * ones_stride; // share weights and bias within a Mini-Batch weight_b[idx] = weight; grad_weight_b[idx] = grad_weight; grad_bias_b[idx] = grad_bias; } } std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input, const at::Tensor &weight, const at::Tensor &bias, const at::Tensor &offset, const at::Tensor &mask, const at::Tensor &grad_output, int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int deformable_group) { THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); AT_ASSERTM(channels == channels_kernel, "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; auto ones = at::ones({height_out, width_out}, input.options()); auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); auto grad_input = at::zeros_like(input); auto grad_weight = at::zeros_like(weight); auto grad_bias = at::zeros_like(bias); auto grad_offset = at::zeros_like(offset); auto grad_mask = at::zeros_like(mask); using scalar_t = float; for (int b = 0; b < batch; b++) { auto input_n = input.select(0, b); auto offset_n = offset.select(0, b); auto mask_n = mask.select(0, b); auto grad_output_n = grad_output.select(0, b); auto grad_input_n = grad_input.select(0, b); auto grad_offset_n = grad_offset.select(0, b); auto grad_mask_n = grad_mask.select(0, b); long m = channels * kernel_h * kernel_w; long n = height_out * width_out; long k = channels_out; THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, grad_output_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, 0.0f, columns.data<scalar_t>(), n); // gradient w.r.t. input coordinate data modulated_deformable_col2im_coord_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_offset_n.data<scalar_t>(), grad_mask_n.data<scalar_t>()); // gradient w.r.t. input data modulated_deformable_col2im_cuda(c10::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, grad_input_n.data<scalar_t>()); // gradient w.r.t. weight, dWeight should accumulate across the batch and group modulated_deformable_im2col_cuda(c10::cuda::getCurrentCUDAStream(), input_n.data<scalar_t>(), offset_n.data<scalar_t>(), mask_n.data<scalar_t>(), 1, channels, height, width, height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, deformable_group, columns.data<scalar_t>()); long m_ = channels_out; long n_ = channels * kernel_h * kernel_w; long k_ = height_out * width_out; THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, columns.data<scalar_t>(), k_, grad_output_n.data<scalar_t>(), k_, 1.0f, grad_weight.data<scalar_t>(), n_); // gradient w.r.t. bias // long m_ = channels_out; // long k__ = height_out * width_out; THCudaBlas_Sgemv(state, 't', k_, m_, 1.0f, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, 1.0f, grad_bias.data<scalar_t>(), 1); } return { grad_input, grad_offset, grad_mask, grad_weight, grad_bias }; }
d86cc264a502957d6f8ff92ae563232f6d7fe053.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "io_iterator.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_mixedprec_device.h" #include <hipcub/hipcub.hpp> #include <memory> namespace RPU { /******************************************************************************************/ /* MixedPrecRPUDeviceCuda CUDA implementation of MixedPrecRPUDevice */ template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(CudaContext *c, int x_size, int d_size) : MixedPrecRPUDeviceBaseCuda<T>(c, x_size, d_size){}; template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda( CudaContext *c, const MixedPrecRPUDevice<T> &rpu_device) : MixedPrecRPUDeviceCuda<T>(c, rpu_device.getXSize(), rpu_device.getDSize()) { populateFrom(rpu_device); }; template <typename T> void MixedPrecRPUDeviceCuda<T>::allocateContainers() { this->context_->synchronizeDevice(); dev_chi_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); nblocks_batch_max_ = this->context_->getSMCount() * (this->context_->maxThreadsPerBlock() / this->context_->getNThreads()); } // copy template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(const MixedPrecRPUDeviceCuda<T> &other) : MixedPrecRPUDeviceBaseCuda<T>(other) { allocateContainers(); dev_chi_->assign(*other.dev_chi_); this->context_->synchronize(); }; template <typename T> MixedPrecRPUDeviceCuda<T> & MixedPrecRPUDeviceCuda<T>::operator=(const MixedPrecRPUDeviceCuda<T> &other) { MixedPrecRPUDeviceCuda<T> tmp(other); swap(*this, tmp); return *this; }; template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(MixedPrecRPUDeviceCuda<T> &&other) { *this = std::move(other); }; template <typename T> MixedPrecRPUDeviceCuda<T> &MixedPrecRPUDeviceCuda<T>::operator=(MixedPrecRPUDeviceCuda<T> &&other) { MixedPrecRPUDeviceBaseCuda<T>::operator=(std::move(other)); dev_chi_ = std::move(other.dev_chi_); nblocks_batch_max_ = nblocks_batch_max_; return *this; } template <typename T> void MixedPrecRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) { const auto &rpu_device = dynamic_cast<const MixedPrecRPUDevice<T> &>(rpu_device_in); if (&rpu_device == nullptr) { RPU_FATAL("populateFrom expects MixedPrecRPUDevice."); } MixedPrecRPUDeviceBaseCuda<T>::populateFrom(rpu_device_in); // will set sizes allocateContainers(); const auto &par = this->getPar(); std::vector<T> v; v.resize(this->size_); rpu_device.getChi(v.data()); dev_chi_->assign(v.data()); // both in x-major this->context_->synchronize(); } template <typename T> __global__ void kernelQuantizeBatch( T *quantized_values, const T *values, const T *nm_values, const int n_bins, const int size_in, const int m_batch_in, const bool trans_in) { volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; int size = size_in; int m_batch = m_batch_in; int total_size = size * m_batch; bool trans = trans_in; T half_bins = (T)(n_bins / 2); // floor T res = (T)1.0 / ((T)half_bins); T value; int total_threads = blockDim.x * gridDim.x; for (int i_stride = 0; i_stride < total_size; i_stride += total_threads) { int idx = i_stride + tid; if (idx < total_size) { value = values[idx]; int sidx = trans ? (idx % m_batch) : (idx / size); T amax = nm_values[sidx]; // amax from noise management value = amax > 0.0 ? value / amax : value; value = RPU_ROUNDFUN(value / res); value = MIN(MAX(value, -half_bins), half_bins) * amax * res; quantized_values[idx] = value; } } } template <typename T> const T *MixedPrecRPUDeviceCuda<T>::quantize( T *buffer_values, const T *values, RPU::NoiseManager<T> *nm, int n_bins, int size, int m_batch, bool trans) { if (n_bins <= 0) { return values; } nm->compute(values, NoiseManagementType::AbsMax, this->io_, m_batch, trans, false); int nthreads = this->context_->getNThreads(); int nblocks = this->context_->getNBlocks(m_batch * size, nthreads); int nblocks_batch = MIN(nblocks_batch_max_, nblocks); hipStream_t s = this->context_->getStream(); hipLaunchKernelGGL(( kernelQuantizeBatch), dim3(nblocks_batch), dim3(nthreads), 0, s, buffer_values, values, nm->getScaleValues(), n_bins, size, m_batch, trans); return buffer_values; } template <typename T> void MixedPrecRPUDeviceCuda<T>::doDirectUpdate( const T *x_input, const T *d_input, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta, const PulsedUpdateMetaParameter<T> &up, T *x_buffer, T *d_buffer) { if (beta != 1.0f) { RPU_FATAL("beta not equal 1 is not supported.") } this->setUpPar(up); const auto &par = getPar(); const T *d_val = quantize( d_buffer, d_input, &*this->noise_manager_d_, par.n_d_bins, this->d_size_, m_batch, d_trans); // % Quantize x const T *x_val = quantize( x_buffer, x_input, &*this->noise_manager_x_, par.n_x_bins, this->x_size_, m_batch, x_trans); // dev_chi is x-size (row) major !! (to facilitate the readout below) if (m_batch == 1) { RPU::math::ger<T>( this->context_, this->x_size_, this->d_size_, lr, x_val, 1, d_val, 1, dev_chi_->getData(), this->x_size_); } else { RPU::math::gemm<T>( this->context_, x_trans, !d_trans, this->x_size_, this->d_size_, m_batch, lr, x_val, x_trans ? m_batch : this->x_size_, d_val, d_trans ? m_batch : this->d_size_, 1.0, // set beta to 1.0. We want to add to Chi dev_chi_->getData(), this->x_size_); } this->doTransfer(dev_weights, par.transfer_lr, m_batch); this->computeSparsity(x_buffer, d_buffer, m_batch); this->advanceUpdateCounter(m_batch); } template <typename T> __global__ void kernelMixedPrecTransfer(T *transfer_out, T *chi, const int size, const T granularity_) { volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { T value = chi[tid]; T dw = truncf(value / granularity_); transfer_out[tid] = dw; chi[tid] = value - granularity_ * dw; } } template <typename T> void MixedPrecRPUDeviceCuda<T>::forwardUpdate( T *dev_weights, const T lr, int i_row_start, const T *transfer_vec, const int n_vec, const bool trans) { if (!lr) { return; } T t_size = n_vec * this->x_size_; if ((this->dev_transfer_tmp_ == nullptr) || this->dev_transfer_tmp_->getSize() < t_size) { this->dev_transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, t_size); } const auto &par = this->getPar(); int nthreads = this->context_->getNThreads(); int nblocks = this->context_->getNBlocks(t_size, nthreads); hipLaunchKernelGGL(( kernelMixedPrecTransfer<T>), dim3(nblocks), dim3(nthreads), 0, this->context_->getStream(), this->dev_transfer_tmp_->getData(), dev_chi_->getData() + i_row_start * this->x_size_, t_size, this->granularity_); // requires to turn on update_managment / bl managment as well this->transfer_pwu_->update( this->dev_transfer_tmp_->getDataConst(), // this is the transfer vector (x_size) transfer_vec, // this should be d_size, non-trans dev_weights, &*this->rpucuda_device_, this->up_, lr * this->granularity_, n_vec, trans, false); } template <typename T> std::vector<T> MixedPrecRPUDeviceCuda<T>::getHiddenWeights() const { auto data = MixedPrecRPUDeviceBaseCuda<T>::getHiddenWeights(); int offset = data.size(); data.resize(offset + this->size_); dev_chi_->copyTo(data.data() + offset); return data; } template class MixedPrecRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class MixedPrecRPUDeviceCuda<double>; #endif } // namespace RPU
d86cc264a502957d6f8ff92ae563232f6d7fe053.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "io_iterator.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_mixedprec_device.h" #include <cub/cub.cuh> #include <memory> namespace RPU { /******************************************************************************************/ /* MixedPrecRPUDeviceCuda CUDA implementation of MixedPrecRPUDevice */ template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(CudaContext *c, int x_size, int d_size) : MixedPrecRPUDeviceBaseCuda<T>(c, x_size, d_size){}; template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda( CudaContext *c, const MixedPrecRPUDevice<T> &rpu_device) : MixedPrecRPUDeviceCuda<T>(c, rpu_device.getXSize(), rpu_device.getDSize()) { populateFrom(rpu_device); }; template <typename T> void MixedPrecRPUDeviceCuda<T>::allocateContainers() { this->context_->synchronizeDevice(); dev_chi_ = RPU::make_unique<CudaArray<T>>(this->context_, this->size_); nblocks_batch_max_ = this->context_->getSMCount() * (this->context_->maxThreadsPerBlock() / this->context_->getNThreads()); } // copy template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(const MixedPrecRPUDeviceCuda<T> &other) : MixedPrecRPUDeviceBaseCuda<T>(other) { allocateContainers(); dev_chi_->assign(*other.dev_chi_); this->context_->synchronize(); }; template <typename T> MixedPrecRPUDeviceCuda<T> & MixedPrecRPUDeviceCuda<T>::operator=(const MixedPrecRPUDeviceCuda<T> &other) { MixedPrecRPUDeviceCuda<T> tmp(other); swap(*this, tmp); return *this; }; template <typename T> MixedPrecRPUDeviceCuda<T>::MixedPrecRPUDeviceCuda(MixedPrecRPUDeviceCuda<T> &&other) { *this = std::move(other); }; template <typename T> MixedPrecRPUDeviceCuda<T> &MixedPrecRPUDeviceCuda<T>::operator=(MixedPrecRPUDeviceCuda<T> &&other) { MixedPrecRPUDeviceBaseCuda<T>::operator=(std::move(other)); dev_chi_ = std::move(other.dev_chi_); nblocks_batch_max_ = nblocks_batch_max_; return *this; } template <typename T> void MixedPrecRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) { const auto &rpu_device = dynamic_cast<const MixedPrecRPUDevice<T> &>(rpu_device_in); if (&rpu_device == nullptr) { RPU_FATAL("populateFrom expects MixedPrecRPUDevice."); } MixedPrecRPUDeviceBaseCuda<T>::populateFrom(rpu_device_in); // will set sizes allocateContainers(); const auto &par = this->getPar(); std::vector<T> v; v.resize(this->size_); rpu_device.getChi(v.data()); dev_chi_->assign(v.data()); // both in x-major this->context_->synchronize(); } template <typename T> __global__ void kernelQuantizeBatch( T *quantized_values, const T *values, const T *nm_values, const int n_bins, const int size_in, const int m_batch_in, const bool trans_in) { volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; int size = size_in; int m_batch = m_batch_in; int total_size = size * m_batch; bool trans = trans_in; T half_bins = (T)(n_bins / 2); // floor T res = (T)1.0 / ((T)half_bins); T value; int total_threads = blockDim.x * gridDim.x; for (int i_stride = 0; i_stride < total_size; i_stride += total_threads) { int idx = i_stride + tid; if (idx < total_size) { value = values[idx]; int sidx = trans ? (idx % m_batch) : (idx / size); T amax = nm_values[sidx]; // amax from noise management value = amax > 0.0 ? value / amax : value; value = RPU_ROUNDFUN(value / res); value = MIN(MAX(value, -half_bins), half_bins) * amax * res; quantized_values[idx] = value; } } } template <typename T> const T *MixedPrecRPUDeviceCuda<T>::quantize( T *buffer_values, const T *values, RPU::NoiseManager<T> *nm, int n_bins, int size, int m_batch, bool trans) { if (n_bins <= 0) { return values; } nm->compute(values, NoiseManagementType::AbsMax, this->io_, m_batch, trans, false); int nthreads = this->context_->getNThreads(); int nblocks = this->context_->getNBlocks(m_batch * size, nthreads); int nblocks_batch = MIN(nblocks_batch_max_, nblocks); cudaStream_t s = this->context_->getStream(); kernelQuantizeBatch<<<nblocks_batch, nthreads, 0, s>>>( buffer_values, values, nm->getScaleValues(), n_bins, size, m_batch, trans); return buffer_values; } template <typename T> void MixedPrecRPUDeviceCuda<T>::doDirectUpdate( const T *x_input, const T *d_input, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta, const PulsedUpdateMetaParameter<T> &up, T *x_buffer, T *d_buffer) { if (beta != 1.0f) { RPU_FATAL("beta not equal 1 is not supported.") } this->setUpPar(up); const auto &par = getPar(); const T *d_val = quantize( d_buffer, d_input, &*this->noise_manager_d_, par.n_d_bins, this->d_size_, m_batch, d_trans); // % Quantize x const T *x_val = quantize( x_buffer, x_input, &*this->noise_manager_x_, par.n_x_bins, this->x_size_, m_batch, x_trans); // dev_chi is x-size (row) major !! (to facilitate the readout below) if (m_batch == 1) { RPU::math::ger<T>( this->context_, this->x_size_, this->d_size_, lr, x_val, 1, d_val, 1, dev_chi_->getData(), this->x_size_); } else { RPU::math::gemm<T>( this->context_, x_trans, !d_trans, this->x_size_, this->d_size_, m_batch, lr, x_val, x_trans ? m_batch : this->x_size_, d_val, d_trans ? m_batch : this->d_size_, 1.0, // set beta to 1.0. We want to add to Chi dev_chi_->getData(), this->x_size_); } this->doTransfer(dev_weights, par.transfer_lr, m_batch); this->computeSparsity(x_buffer, d_buffer, m_batch); this->advanceUpdateCounter(m_batch); } template <typename T> __global__ void kernelMixedPrecTransfer(T *transfer_out, T *chi, const int size, const T granularity_) { volatile unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < size) { T value = chi[tid]; T dw = truncf(value / granularity_); transfer_out[tid] = dw; chi[tid] = value - granularity_ * dw; } } template <typename T> void MixedPrecRPUDeviceCuda<T>::forwardUpdate( T *dev_weights, const T lr, int i_row_start, const T *transfer_vec, const int n_vec, const bool trans) { if (!lr) { return; } T t_size = n_vec * this->x_size_; if ((this->dev_transfer_tmp_ == nullptr) || this->dev_transfer_tmp_->getSize() < t_size) { this->dev_transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, t_size); } const auto &par = this->getPar(); int nthreads = this->context_->getNThreads(); int nblocks = this->context_->getNBlocks(t_size, nthreads); kernelMixedPrecTransfer<T><<<nblocks, nthreads, 0, this->context_->getStream()>>>( this->dev_transfer_tmp_->getData(), dev_chi_->getData() + i_row_start * this->x_size_, t_size, this->granularity_); // requires to turn on update_managment / bl managment as well this->transfer_pwu_->update( this->dev_transfer_tmp_->getDataConst(), // this is the transfer vector (x_size) transfer_vec, // this should be d_size, non-trans dev_weights, &*this->rpucuda_device_, this->up_, lr * this->granularity_, n_vec, trans, false); } template <typename T> std::vector<T> MixedPrecRPUDeviceCuda<T>::getHiddenWeights() const { auto data = MixedPrecRPUDeviceBaseCuda<T>::getHiddenWeights(); int offset = data.size(); data.resize(offset + this->size_); dev_chi_->copyTo(data.data() + offset); return data; } template class MixedPrecRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class MixedPrecRPUDeviceCuda<double>; #endif } // namespace RPU
75e820f51a7a858f2b3e3cf40753aac1b4c60c2c.hip
// !!! This is a file automatically generated by hipify!!! /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include "cudnnUtils.h" namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void depthwiseConv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW) { // cudnn supports only following case: mC = 1, oC = iC (groupCount == iC) // input [bS, iC, iH, iW] nchw or [bS, iH, iW, iC] nhwc // weights [iC, mC, kH, kW] // bias [oC], may be nullptr // output [bS, oC, oH, oW] nchw or [bS, oH, oW, oC] nhwc // oC = iC*mC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(1); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; PointersManager manager(context, __func__); // input descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // weights descriptor FilterDesc w; w.set4D(cudnnDataType(weights->dataType()), CUDNN_TENSOR_NCHW, iC, mC, kH, kW); // output descriptor CudnnTensor z; if (output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnSetConvolutionGroupCount), cudnnSetConvolutionGroupCount( conv, iC)); // set number of groups (depthwise mode) in description of convolution, groupCount == iC // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardAlgorithm), cudnnGetConvolutionForwardAlgorithm( // *handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf)); if (count == 0) throw sd::cuda_exception::build("depthwiseConv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed", 0); algo = algoPerf.algo; // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize)); void* wsData = manager.allocateDevMem(wsSize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionForward), cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer())); // add bias if it is present if (bias != nullptr) { CudnnTensor b; // b.set( format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: // bias->lengthOf()); b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer())); } // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("depthwiseConv2dCUDNN: hipStreamSynchronize failed !", cudaErr); } ////////////////////////////////////////////////////////////////////////// static void depthwiseConv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW) { // cudnn supports only following case: mC = 1, oC = iC (groupCount == iC) // input, gradI [bS, iC, iH, iW] nchw or [bS, iH, iW, iC] nhwc // weights, gradW [iC, mC, kH, kW] // gradB [oC], may be nullptr // gradO [bS, oC, oH, oW] nchw or [bS, oH, oW, oC] nhwc // oC = iC*mC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(1); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; PointersManager manager(context, __func__); // input descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // gradO descriptor CudnnTensor dz; if (gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); // gradI descriptor CudnnTensor dx; if (gradI->ews() == 1 && gradI->ordering() == 'c') dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); // gradW descriptor FilterDesc dw; dw.set4D(cudnnDataType(gradW->dataType()), CUDNN_TENSOR_NCHW, iC, mC, kH, kW); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnSetConvolutionGroupCount), cudnnSetConvolutionGroupCount( conv, iC)); // set number of groups (depthwise mode) in description of convolution, groupCount == iC // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardFilterAlgorithm), // cudnnGetConvolutionBackwardFilterAlgorithm( *handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, // 0, &algoGradW)); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); if (count == 0) throw sd::cuda_exception::build( "depthwiseConv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0 ", 0); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardDataAlgorithm), // cudnnGetConvolutionBackwardDataAlgorithm( *handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, // &algoGradI)); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); if (count == 0) throw sd::cuda_exception::build( "depthwiseConv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0 ", 0); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); void* wsGradWData = manager.allocateDevMem(wsGradWSize); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); void* wsGradIData = manager.allocateDevMem(wsGradISize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if (gradB != nullptr) { CudnnTensor db; // db.set( format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: // gradB->lengthOf()); db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardBias), cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); } // run calculation for gradW CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardFilter), cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardData), cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("depthwiseConv2dBpCUDNN: hipStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(depthwise_conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] = iC*mC auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW) REQUIRE_TRUE(input->rankOf() == 4, 0, "DEPTHWISECONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "DEPTHWISECONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = // iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, mC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "DEPTHWISECONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); REQUIRE_TRUE( output->sizeAt(indIOioC) == iC * mC, 0, "DEPTHWISECONV2D CUDNN OP: the output_channels must be equal to input_channels * channels_multiplier = %i !", iC * mC); if (bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "DEPTHWISECONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " "%i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::vector<int> wPermut; // cudnn support format {oC, iC/groupCount, kH, kW} only, mC = 1, oC = iC (groupCount == // iC) that is {iC, mC, kH, kW} in our case if (0 == wFormat) wPermut = {2, 3, 0, 1}; // kH, kW, iC, mC -> iC, mC, kH, kW else if (1 == wFormat) wPermut = {1, 0, 2, 3}; // mC, iC, kH, kW -> iC, mC, kH, kW else wPermut = {3, 0, 1, 2}; // mC, kH, kW, iC -> iC, mC, kH, kW std::unique_ptr<NDArray> uNewWeights( new NDArray(weights->ordering(), {iC, mC, kH, kW}, weights->dataType(), weights->getContext())); uNewWeights->assign(weights->permute(wPermut)); std::unique_ptr<NDArray> tmpInput = {}; if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); if (tmpInput) input = tmpInput.get(); } depthwiseConv2dCUDNN(block.launchContext(), input, uNewWeights.get(), bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW); return sd::Status::OK; } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(depthwise_conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] = iC*mC const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] Requirements req("CUDNN DEPTHWISE_CONV2d OP"); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectEq(makeInfoVariable(weights->sizeAt(0 == wFormat ? 3 : 0), "weights#mC"), 1) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(depthwise_conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] = [iC*mC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] REQUIRE_TRUE(input->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of output gradients (next epsilon) array must be equal to 4, but got " "%i instead !", gradO->rankOf()); int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 1-NHWC, 0-NCHW int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = // iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier int trueoH, trueoW; // correct output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS, oC, trueoH, trueoW, 0, indIOioC, indOoH, indOoH + 1}); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, mC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but " "got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but " "got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::vector<int> wPermut, gradWPermut; // cudnn support format {oC, iC/groupCount, kH, kW} only, mC = 1, oC = iC // (groupCount == iC) that is {iC, mC, kH, kW} if (0 == wFormat) { wPermut = {2, 3, 0, 1}; // kH, kW, iC, mC -> iC, mC, kH, kW gradWPermut = {2, 3, 0, 1}; // iC, mC, kH, kW -> kH, kW, iC, mC } else if (1 == wFormat) { wPermut = {1, 0, 2, 3}; // mC, iC, kH, kW -> iC, mC, kH, kW gradWPermut = {1, 0, 2, 3}; // iC, mC, kH, kW -> mC, iC, kH, kW } else { wPermut = {3, 0, 1, 2}; // mC, kH, kW, iC -> iC, mC, kH, kW gradWPermut = {1, 2, 3, 0}; // iC, mC, kH, kW -> mC, kH, kW, iC } std::unique_ptr<NDArray> tmpGradI = {}, tmpInput = {}; std::unique_ptr<NDArray> uNewGradW( new NDArray(gradW->ordering(), {iC, mC, kH, kW}, gradW->dataType(), gradW->getContext())); std::unique_ptr<NDArray> uNewWeights( new NDArray(weights->ordering(), {iC, mC, kH, kW}, weights->dataType(), weights->getContext())); uNewWeights->assign(weights->permute(wPermut)); NDArray* newInput = input; NDArray* newGradI = gradI; if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); tmpGradI = std::move(std::get<1>(ret)); if (tmpInput) newInput = tmpInput.get(); if (tmpGradI) newGradI = tmpGradI.get(); } depthwiseConv2dBpCUDNN(block.launchContext(), newInput, uNewWeights.get(), gradO, newGradI, uNewGradW.get(), gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW); uNewGradW->permutei(gradWPermut); gradW->assign(uNewGradW.get()); if (newInput != input) { if (isNCHW) gradI->assign((*newGradI)({0, 0, 0, 0, 0, gradI->sizeAt(2), 0, gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0, 0, 0, gradI->sizeAt(1), 0, gradI->sizeAt(2), 0, 0})); } return sd::Status::OK; } PLATFORM_CHECK(depthwise_conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] = [iC*mC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC const int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] Requirements req("CUDNN DEPTHWISE_CONV2d_BP OP"); const auto inType = input->dataType(); const auto wType = weights->dataType(); const auto gType = gradO->dataType(); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectTrue(makeInfoVariable(isNCHW, "isNCHW")) && req.expectEq(makeInfoVariable(weights->sizeAt(0 == wFormat ? 3 : 0), "weights#mC"), 1) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } else { req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } } // namespace platforms } // namespace ops } // namespace sd
75e820f51a7a858f2b3e3cf40753aac1b4c60c2c.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include "cudnnUtils.h" namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void depthwiseConv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW) { // cudnn supports only following case: mC = 1, oC = iC (groupCount == iC) // input [bS, iC, iH, iW] nchw or [bS, iH, iW, iC] nhwc // weights [iC, mC, kH, kW] // bias [oC], may be nullptr // output [bS, oC, oH, oW] nchw or [bS, oH, oW, oC] nhwc // oC = iC*mC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(1); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; PointersManager manager(context, __func__); // input descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // weights descriptor FilterDesc w; w.set4D(cudnnDataType(weights->dataType()), CUDNN_TENSOR_NCHW, iC, mC, kH, kW); // output descriptor CudnnTensor z; if (output->ews() == 1 && output->ordering() == 'c') z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnSetConvolutionGroupCount), cudnnSetConvolutionGroupCount( conv, iC)); // set number of groups (depthwise mode) in description of convolution, groupCount == iC // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardAlgorithm), cudnnGetConvolutionForwardAlgorithm( // *handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf)); if (count == 0) throw sd::cuda_exception::build("depthwiseConv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed", 0); algo = algoPerf.algo; // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize)); void* wsData = manager.allocateDevMem(wsSize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionForward), cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer())); // add bias if it is present if (bias != nullptr) { CudnnTensor b; // b.set( format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: // bias->lengthOf()); b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer())); } // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("depthwiseConv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); } ////////////////////////////////////////////////////////////////////////// static void depthwiseConv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW) { // cudnn supports only following case: mC = 1, oC = iC (groupCount == iC) // input, gradI [bS, iC, iH, iW] nchw or [bS, iH, iW, iC] nhwc // weights, gradW [iC, mC, kH, kW] // gradB [oC], may be nullptr // gradO [bS, oC, oH, oW] nchw or [bS, oH, oW, oC] nhwc // oC = iC*mC int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, 0, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(1); auto handle = reinterpret_cast<cudnnHandle_t*>(context->getCuDnnHandle()); CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; PointersManager manager(context, __func__); // input descriptor CudnnTensor x; if (input->ews() == 1 && input->ordering() == 'c') x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); // gradO descriptor CudnnTensor dz; if (gradO->ews() == 1 && gradO->ordering() == 'c') dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); // gradI descriptor CudnnTensor dx; if (gradI->ews() == 1 && gradI->ordering() == 'c') dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); // gradW descriptor FilterDesc dw; dw.set4D(cudnnDataType(gradW->dataType()), CUDNN_TENSOR_NCHW, iC, mC, kH, kW); // description of convolution ConvolutionDesc conv; conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnSetConvolutionGroupCount), cudnnSetConvolutionGroupCount( conv, iC)); // set number of groups (depthwise mode) in description of convolution, groupCount == iC // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardFilterAlgorithm), // cudnnGetConvolutionBackwardFilterAlgorithm( *handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, // 0, &algoGradW)); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); if (count == 0) throw sd::cuda_exception::build( "depthwiseConv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0 ", 0); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardDataAlgorithm), // cudnnGetConvolutionBackwardDataAlgorithm( *handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, // &algoGradI)); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); if (count == 0) throw sd::cuda_exception::build( "depthwiseConv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0 ", 0); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); void* wsGradWData = manager.allocateDevMem(wsGradWSize); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); void* wsGradIData = manager.allocateDevMem(wsGradISize); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if (gradB != nullptr) { CudnnTensor db; // db.set( format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: // gradB->lengthOf()); db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardBias), cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); } // run calculation for gradW CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardFilter), cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); // run calculation for gradI CHECK_CUDNN_FAILURE_MSG( STRINGIZE(cudnnConvolutionBackwardData), cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("depthwiseConv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(depthwise_conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] = iC*mC auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW) REQUIRE_TRUE(input->rankOf() == 4, 0, "DEPTHWISECONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "DEPTHWISECONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = // iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, mC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "DEPTHWISECONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); REQUIRE_TRUE( output->sizeAt(indIOioC) == iC * mC, 0, "DEPTHWISECONV2D CUDNN OP: the output_channels must be equal to input_channels * channels_multiplier = %i !", iC * mC); if (bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "DEPTHWISECONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " "%i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::vector<int> wPermut; // cudnn support format {oC, iC/groupCount, kH, kW} only, mC = 1, oC = iC (groupCount == // iC) that is {iC, mC, kH, kW} in our case if (0 == wFormat) wPermut = {2, 3, 0, 1}; // kH, kW, iC, mC -> iC, mC, kH, kW else if (1 == wFormat) wPermut = {1, 0, 2, 3}; // mC, iC, kH, kW -> iC, mC, kH, kW else wPermut = {3, 0, 1, 2}; // mC, kH, kW, iC -> iC, mC, kH, kW std::unique_ptr<NDArray> uNewWeights( new NDArray(weights->ordering(), {iC, mC, kH, kW}, weights->dataType(), weights->getContext())); uNewWeights->assign(weights->permute(wPermut)); std::unique_ptr<NDArray> tmpInput = {}; if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); if (tmpInput) input = tmpInput.get(); } depthwiseConv2dCUDNN(block.launchContext(), input, uNewWeights.get(), bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW); return sd::Status::OK; } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(depthwise_conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] = iC*mC const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] Requirements req("CUDNN DEPTHWISE_CONV2d OP"); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectEq(makeInfoVariable(weights->sizeAt(0 == wFormat ? 3 : 0), "weights#mC"), 1) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(depthwise_conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] = [iC*mC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] REQUIRE_TRUE(input->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "DEPTHWISECONV2D_BP CUDNN OP: rank of output gradients (next epsilon) array must be equal to 4, but got " "%i instead !", gradO->rankOf()); int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 1-NHWC, 0-NCHW int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = // iC*mC), output channels, output height/width int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH); mC = weights->sizeAt(indWmC); // channels multiplier int trueoH, trueoW; // correct output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<sd::LongType> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS, oC, trueoH, trueoW, 0, indIOioC, indOoH, indOoH + 1}); std::vector<sd::LongType> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, mC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but " "got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "DEPTHWISECONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but " "got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); std::vector<int> wPermut, gradWPermut; // cudnn support format {oC, iC/groupCount, kH, kW} only, mC = 1, oC = iC // (groupCount == iC) that is {iC, mC, kH, kW} if (0 == wFormat) { wPermut = {2, 3, 0, 1}; // kH, kW, iC, mC -> iC, mC, kH, kW gradWPermut = {2, 3, 0, 1}; // iC, mC, kH, kW -> kH, kW, iC, mC } else if (1 == wFormat) { wPermut = {1, 0, 2, 3}; // mC, iC, kH, kW -> iC, mC, kH, kW gradWPermut = {1, 0, 2, 3}; // iC, mC, kH, kW -> mC, iC, kH, kW } else { wPermut = {3, 0, 1, 2}; // mC, kH, kW, iC -> iC, mC, kH, kW gradWPermut = {1, 2, 3, 0}; // iC, mC, kH, kW -> mC, kH, kW, iC } std::unique_ptr<NDArray> tmpGradI = {}, tmpInput = {}; std::unique_ptr<NDArray> uNewGradW( new NDArray(gradW->ordering(), {iC, mC, kH, kW}, gradW->dataType(), gradW->getContext())); std::unique_ptr<NDArray> uNewWeights( new NDArray(weights->ordering(), {iC, mC, kH, kW}, weights->dataType(), weights->getContext())); uNewWeights->assign(weights->permute(wPermut)); NDArray* newInput = input; NDArray* newGradI = gradI; if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); tmpInput = std::move(std::get<0>(ret)); tmpGradI = std::move(std::get<1>(ret)); if (tmpInput) newInput = tmpInput.get(); if (tmpGradI) newGradI = tmpGradI.get(); } depthwiseConv2dBpCUDNN(block.launchContext(), newInput, uNewWeights.get(), gradO, newGradI, uNewGradW.get(), gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW); uNewGradW->permutei(gradWPermut); gradW->assign(uNewGradW.get()); if (newInput != input) { if (isNCHW) gradI->assign((*newGradI)({0, 0, 0, 0, 0, gradI->sizeAt(2), 0, gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0, 0, 0, gradI->sizeAt(1), 0, gradI->sizeAt(2), 0, 0})); } return sd::Status::OK; } PLATFORM_CHECK(depthwise_conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, mC], [mC, iC, kH, kW], [mC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] = [iC*mC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC const int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, mC], 1 - [mC, iC, kH, kW], 2 - [mC, kH, kW, iC] Requirements req("CUDNN DEPTHWISE_CONV2d_BP OP"); const auto inType = input->dataType(); const auto wType = weights->dataType(); const auto gType = gradO->dataType(); req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && req.expectTrue(makeInfoVariable(isNCHW, "isNCHW")) && req.expectEq(makeInfoVariable(weights->sizeAt(0 == wFormat ? 3 : 0), "weights#mC"), 1) && req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); if (bias) { req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } else { req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); } req.logTheSuccess(); return req; } } // namespace platforms } // namespace ops } // namespace sd
26c41abbc84e89c6a6f7d90469e91f76fdb290bc.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> int recursiveReduceWithStrideOnCPU(int *data, int const size) { if (size == 1) return data[0]; int const stride = size / 2; for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } return recursiveReduceWithStrideOnCPU(data, stride); } __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; if (idx >= n) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } __syncthreads(); } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceneighboredwithoutIF (int *g_idata, int *g_odata, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; if(idx >= n) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } __syncthreads(); } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; int size = 1 << 24; printf(" with array size %d ", size); // execution configuration int blocksize = 512; if(argc > 1) { blocksize = atoi(argv[1]); } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); for (int i = 0; i < size; i++) { h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); iStart = seconds(); int cpu_sum = recursiveReduceWithStrideOnCPU (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu with IF elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceneighboredwithoutIF), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu withtout IF elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); CHECK(hipDeviceReset()); bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
26c41abbc84e89c6a6f7d90469e91f76fdb290bc.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> int recursiveReduceWithStrideOnCPU(int *data, int const size) { if (size == 1) return data[0]; int const stride = size / 2; for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } return recursiveReduceWithStrideOnCPU(data, stride); } __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; if (idx >= n) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } __syncthreads(); } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceneighboredwithoutIF (int *g_idata, int *g_odata, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int *idata = g_idata + blockIdx.x * blockDim.x; if(idx >= n) return; for (int stride = 1; stride < blockDim.x; stride *= 2) { int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } __syncthreads(); } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; int size = 1 << 24; printf(" with array size %d ", size); // execution configuration int blocksize = 512; if(argc > 1) { blocksize = atoi(argv[1]); } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); for (int i = 0; i < size; i++) { h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); iStart = seconds(); int cpu_sum = recursiveReduceWithStrideOnCPU (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu with IF elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceneighboredwithoutIF<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu withtout IF elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); CHECK(cudaDeviceReset()); bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
e9b19efea76923330b3cceb8a8fca8a7447f86b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cpluscplus extern "C"{ #endif #include "reduce_kernel.h" #include <stdio.h> #define CUDA_1D_KERNEL_LOOP(i,n) \ for (int i=blockIdx.x*blockDim.x + threadIdx.x;i<n; \ i += blockDim.x * gridDim.x) __global__ void SoftmaxForward( const int nthreads, const float * input_data, const int64_t * offsets_data, float * output_data, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; // get max float v_max = input_data[s*n_dim+k]; for (int j=s+1;j<e;++j) { if (input_data[j*n_dim+k]>v_max) { v_max = input_data[j*n_dim+k]; } } // subtract max and exp, accumulate sum float sum = 0; for (int j=s;j<e;++j) { output_data[j*n_dim+k] = exp(input_data[j*n_dim+k]-v_max); sum += output_data[j*n_dim+k]; } // divide sum for (int j=s;j<e;++j) { output_data[j*n_dim+k] /= sum; } } } __global__ void ReduceMaxForward( const int nthreads, const float * input, const int64_t * offsets, float * output, int64_t * buffer, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; output[index] = input[s*n_dim+dim_idx]; buffer[index] = s*n_dim+dim_idx; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; if (input[input_idx]>output[index]) { output[index] = input[input_idx]; buffer[index] = input_idx; } } } } __global__ void ReduceMeanForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } output[index] = output[index]/(e-s); } } __global__ void ReduceForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } } } int SoftmaxForwardLauncher( const float * input_data, const int64_t * offsets_data, float * output_data, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( SoftmaxForward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, input_data, offsets_data, output_data, n_feat, n_dim, n_sample); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxForwardLauncher( const float * input, const int64_t * offsets, float * output, int64_t * buffer, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceMaxForward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, input, offsets, output, buffer, n_feat, n_dim, n_sample); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceMeanForward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceForward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void SoftmaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; for (int j=s;j<e;++j) { input_grad[j*n_dim+k] = output_grad[j*n_dim+k]; for (int l=s;l<e;++l) { input_grad[j*n_dim+k] -= output_grad[l*n_dim+k]*output_data[l*n_dim+k]; } input_grad[j*n_dim+k] *= output_data[j*n_dim+k]; } } } __global__ void ReduceMaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; //int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = buffer[index]; input_grad[input_idx] = output_grad[index]; } } } __global__ void ReduceMeanBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]/(e-s); } } } __global__ void ReduceBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]; } } } int SoftmaxBackwardLauncher( const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( SoftmaxBackward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, output_grad, offsets_data, output_data, input_grad, n_feat, n_dim, n_sample ); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, int n_feat, int n_dim, int n_sample, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceMaxBackward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, output_grad, offsets, input_grad, buffer, n_feat, n_dim, n_sample ); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceMeanBackward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReduceBackward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; output[o_idx] = input[index]; } } } int ReplicateForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, hipStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReplicateForward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; input_grad[index] += output_grad[o_idx]; } } } int ReplicateBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; hipError_t err; // call cuda func hipLaunchKernelGGL(( ReplicateBackward), dim3((nthreads+kThreadsPerBlock-1)/kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = hipGetLastError(); if (hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cpluscplus extern } #endif
e9b19efea76923330b3cceb8a8fca8a7447f86b8.cu
#ifdef __cpluscplus extern "C"{ #endif #include "reduce_kernel.h" #include <stdio.h> #define CUDA_1D_KERNEL_LOOP(i,n) \ for (int i=blockIdx.x*blockDim.x + threadIdx.x;i<n; \ i += blockDim.x * gridDim.x) __global__ void SoftmaxForward( const int nthreads, const float * input_data, const int64_t * offsets_data, float * output_data, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; // get max float v_max = input_data[s*n_dim+k]; for (int j=s+1;j<e;++j) { if (input_data[j*n_dim+k]>v_max) { v_max = input_data[j*n_dim+k]; } } // subtract max and exp, accumulate sum float sum = 0; for (int j=s;j<e;++j) { output_data[j*n_dim+k] = exp(input_data[j*n_dim+k]-v_max); sum += output_data[j*n_dim+k]; } // divide sum for (int j=s;j<e;++j) { output_data[j*n_dim+k] /= sum; } } } __global__ void ReduceMaxForward( const int nthreads, const float * input, const int64_t * offsets, float * output, int64_t * buffer, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; output[index] = input[s*n_dim+dim_idx]; buffer[index] = s*n_dim+dim_idx; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; if (input[input_idx]>output[index]) { output[index] = input[input_idx]; buffer[index] = input_idx; } } } } __global__ void ReduceMeanForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } output[index] = output[index]/(e-s); } } __global__ void ReduceForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; output[index] += input[input_idx]; } } } int SoftmaxForwardLauncher( const float * input_data, const int64_t * offsets_data, float * output_data, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func SoftmaxForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input_data, offsets_data, output_data, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxForwardLauncher( const float * input, const int64_t * offsets, float * output, int64_t * buffer, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMaxForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, buffer, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMeanForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void SoftmaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int k = index%n_dim; // dim_idx int s = offsets_data[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets_data[sample_idx+1]; for (int j=s;j<e;++j) { input_grad[j*n_dim+k] = output_grad[j*n_dim+k]; for (int l=s;l<e;++l) { input_grad[j*n_dim+k] -= output_grad[l*n_dim+k]*output_data[l*n_dim+k]; } input_grad[j*n_dim+k] *= output_data[j*n_dim+k]; } } } __global__ void ReduceMaxBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; //int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = buffer[index]; input_grad[input_idx] = output_grad[index]; } } } __global__ void ReduceMeanBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]/(e-s); } } } __global__ void ReduceBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int input_idx = i*n_dim+dim_idx; input_grad[input_idx] = output_grad[index]; } } } int SoftmaxBackwardLauncher( const float * output_grad, const int64_t * offsets_data, const float * output_data, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func SoftmaxBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets_data, output_data, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMaxBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, const int64_t * buffer, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMaxBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, buffer, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceMeanBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceMeanBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } int ReduceBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReduceBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateForward( const int nthreads, const float * input, const int64_t * offsets, float * output, const int n_feat, const int n_dim, const int n_sample) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; output[o_idx] = input[index]; } } } int ReplicateForwardLauncher( const float * input, const int64_t * offsets, float * output, int n_feat, int n_dim, int n_sample, cudaStream_t stream ) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReplicateForward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, input, offsets, output, n_feat, n_dim, n_sample); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ReplicateBackward( const int nthreads, const float * output_grad, const int64_t * offsets, float * input_grad, const int n_feat, const int n_dim, const int n_sample ) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int sample_idx = index/n_dim; int dim_idx = index%n_dim; int s = offsets[sample_idx]; int e = -1; if (sample_idx==n_sample-1) e = n_feat; else e = offsets[sample_idx+1]; for (int i=s;i<e;++i) { int o_idx = i*n_dim+dim_idx; input_grad[index] += output_grad[o_idx]; } } } int ReplicateBackwardLauncher( const float * output_grad, const int64_t * offsets, float * input_grad, int n_feat, int n_dim, int n_sample, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int nthreads = n_sample * n_dim; cudaError_t err; // call cuda func ReplicateBackward<<<(nthreads+kThreadsPerBlock-1)/kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( nthreads, output_grad, offsets, input_grad, n_feat, n_dim, n_sample ); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cpluscplus extern } #endif
33e7ead21604fd584e19185927b6b491c028549b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2013 Ankur Handa and Shuda Li * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include<thrust/random.h> #include<thrust/transform.h> #include<thrust/device_vector.h> #include<thrust/iterator/counting_iterator.h> #include<thrust/iterator/zip_iterator.h> #include<thrust/tuple.h> #include <thrust/random/normal_distribution.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #include <boost/math/common_factor_rt.hpp> #include <assert.h> #include <opencv2/cudaarithm.hpp> #include <opencv2/core/cuda/common.hpp> #include "add_kinect_noise.cuh" #include "vector_math.hpp" using namespace pcl::device; using namespace cv::cuda; typedef thrust::device_vector<float3>::iterator Float3Iterator; typedef thrust::tuple<Float3Iterator, Float3Iterator> VertexNormalIteratorTuple; typedef thrust::zip_iterator<VertexNormalIteratorTuple> ZipIterator; typedef thrust::tuple<float3, float3> VertexNormalTuple; __host__ __device__ unsigned int hash(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } struct ccd_camera_noise { const float sigma_s_red; const float sigma_s_green; const float sigma_s_blue; const float sigma_c_red; const float sigma_c_green; const float sigma_c_blue; const float scale; ccd_camera_noise(float _sigma_s_red, float _sigma_s_green, float _sigma_s_blue, float _sigma_c_red, float _sigma_c_green, float _sigma_c_blue, float _scale) : sigma_s_red(_sigma_s_red), sigma_s_green(_sigma_s_green), sigma_s_blue(_sigma_s_blue), sigma_c_red(_sigma_c_red), sigma_c_green(_sigma_c_green), sigma_c_blue(_sigma_c_blue), scale(_scale) {} __host__ __device__ float3 operator()(const float3& val, const unsigned int& thread_id) { float3 noisy_pix; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); noisy_pix.x = val.x/scale; noisy_pix.y = val.y/scale; noisy_pix.z = val.z/scale; thrust::random::normal_distribution<float> red_pnoise (0.0f,sqrt(val.x)*sigma_s_red ); thrust::random::normal_distribution<float> green_pnoise(0.0f,sqrt(val.y)*sigma_s_green); thrust::random::normal_distribution<float> blue_pnoise (0.0f,sqrt(val.z)*sigma_s_blue ); thrust::random::normal_distribution<float> red_cnoise (0.0f,sigma_c_red ); thrust::random::normal_distribution<float> green_cnoise (0.0f,sigma_c_green); thrust::random::normal_distribution<float> blue_cnoise (0.0f,sigma_c_blue ); noisy_pix.x = noisy_pix.x + red_pnoise(rng) + red_cnoise(rng); noisy_pix.y = noisy_pix.y + green_pnoise(rng) + green_cnoise(rng); noisy_pix.z = noisy_pix.z + blue_pnoise(rng) + blue_cnoise(rng); return noisy_pix; } }; void launch_add_camera_noise(float3* img_array, float3* noisy_image, const float3& sigma_s, const float3& sigma_c, const unsigned int width, const unsigned int height, float scale) { thrust::device_ptr<float3>img_src(img_array); thrust::device_ptr<float3>img_dest(noisy_image); thrust::transform( img_src, img_src + width*height, thrust::make_counting_iterator(0), img_dest, ccd_camera_noise( sigma_s.x, sigma_s.y, sigma_s.z, sigma_c.x, sigma_c.y, sigma_c.z, scale) ); return; } struct add_kinect_noise { float focal_length; float theta_1; float theta_2; float z1; float z2; float z3; add_kinect_noise(float _focal_length, float _theta_1, float _theta_2, float _z1, float _z2, float _z3): focal_length(_focal_length), theta_1(_theta_1), theta_2(_theta_2), z1(_z1), z2(_z2), z3(_z3){} __host__ __device__ float3 operator()(const VertexNormalTuple& vertex_normal_tuple, const unsigned int& thread_id ) { float3 noisy_3D; float3 noisy_lateral = make_float3(0,0,0); float3 noisy_axial = make_float3(0,0,0); /// Get the seed up clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); const float3 point3D = thrust::get<0>(vertex_normal_tuple); const float3 normal3D = thrust::get<1>(vertex_normal_tuple); float depth = point3D.z; float my_pi = 22.0f/7.0f; /// Subtract the 1 from the dot product; points are represented in homogeneous form with point.w =1 float dot_prod = normal3D.x*point3D.x + normal3D.y*point3D.y + normal3D.z*point3D.z ; /// xyz of point float3 point3D_3 = point3D; float norm_point = sqrtf( point3D_3.x* point3D_3.x + point3D_3.y* point3D_3.y + point3D_3.z*point3D_3.z ); /// negative sign to indicate the position vector of the point starts from the point float theta = fabs(acosf(-dot_prod/norm_point)); float sigma_theta = theta_1 + theta_2*(theta)/(my_pi/2-theta); sigma_theta = sigma_theta*(depth)/focal_length; thrust::random::normal_distribution<float> normal_noise(0,sigma_theta); float noise_level = normal_noise(rng); noisy_lateral.x = point3D.x + noise_level*normal3D.x; noisy_lateral.y = point3D.y + noise_level*normal3D.y; noisy_lateral.z = point3D.z + noise_level*normal3D.z; noisy_3D.x = noisy_lateral.x + noisy_axial.x; noisy_3D.y = noisy_lateral.y + noisy_axial.y; noisy_3D.z = noisy_lateral.z + noisy_axial.z; if ( fabs(my_pi/2 - theta ) <= 8.0/180.0f*my_pi) { noisy_3D.z = 0.0f; } return noisy_3D; } }; void launch_add_kinect_noise(float3* points3D, float3* normals3D, float3* noisy_points, const unsigned int stridef3, const unsigned int height, float focal_length, float theta_1, float theta_2, float z1, float z2, float z3) { thrust::device_ptr<float3>points_src(points3D); thrust::device_ptr<float3>normals_src(normals3D); thrust::device_ptr<float3>points_dest(noisy_points); ZipIterator vertex_normal_tuple(thrust::make_tuple(points_src, normals_src)); try { thrust::transform( vertex_normal_tuple, vertex_normal_tuple+stridef3*height, thrust::make_counting_iterator(0), points_dest,add_kinect_noise(focal_length, theta_1, theta_2, z1, z2, z3) ); } catch(thrust::system_error &e) { // output an error message and exit std::cerr << "Error accessing vector element: " << e.what() << std::endl; exit(-1); } return; } struct colour_from_normals{ colour_from_normals(){}; __host__ __device__ float3 operator()(const float3& normal) { float3 colour; colour.x = ( ( normal.x*128.f+128.f ) ); colour.y = ( ( normal.y*128.f+128.f ) ); colour.z = ( ( normal.z*128.f+128.f ) ); return colour; } }; void launch_colour_from_normals(float3* normals, float3* colour, const unsigned int stridef3, const unsigned int height) { thrust::device_ptr<float3> normal_src(normals); thrust::device_ptr<float3> colour_dest(colour); thrust::transform(normal_src,normal_src + stridef3*height, colour_dest, colour_from_normals()); return; } __global__ void cu_colour_from_normals(const cv::cuda::PtrStepSz<float3> normal, cv::cuda::PtrStepSz<uchar3> color) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= normal.cols && nY >= normal.rows) return; const float3& nl = normal.ptr(nY)[nX]; uchar3& colour = color.ptr(nY)[nX]; float tmp; tmp = nl.x*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.x = uchar( tmp ); tmp = nl.y*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.y = uchar( tmp ); tmp = nl.z*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.z = uchar( tmp ); return; } void launch_colour_from_normals(const GpuMat& normals, GpuMat* colour) { //define grid and block dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(normals.cols, block.x), cv::cuda::device::divUp(normals.rows, block.y)); //run kernel hipLaunchKernelGGL(( cu_colour_from_normals), dim3(grid),dim3(block), 0, 0, normals,*colour ); cudaSafeCall ( hipGetLastError () ); } struct gaussian_rand{ float sigma; gaussian_rand(float _sigma):sigma(_sigma){}; __host__ __device__ float2 operator()( float2 point, const unsigned int& thread_id ) { float2 noise; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); thrust::random::normal_distribution<float> randn(0,1); noise.x = randn(rng)/sigma; noise.y = randn(rng)/sigma; return noise; } }; void gaussian_shifts(float2* tex_coods, const unsigned int stridef2, const unsigned int height, const float _sigma) { thrust::device_ptr<float2>coords_src(tex_coods); thrust::transform(coords_src,coords_src+stridef2*height, thrust::make_counting_iterator(0), coords_src, gaussian_rand(_sigma) ); } __global__ void cuda_keneral_add_shift (const PtrStepSz<float> depth, const PtrStepSz<float2> gaussian_shift, PtrStepSz<float> shifted_depth ) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= depth.cols || nY >= depth.rows ) return; const float2& sh = gaussian_shift.ptr(nY)[nX]; int2 location = make_int2( round( nX + sh.x ), round( nY + sh.y ) ); int max; max = depth.cols-1; location.x = location.x < 0 ? 0 : location.x; location.x = location.x > max? max : location.x; max = depth.rows-1; location.y = location.y < 0 ? 0 : location.y; location.y = location.y > max? max : location.y; shifted_depth.ptr(nY)[nX] = depth.ptr(location.y)[location.x]; return; } void add_gaussian_shifts( const GpuMat& depth_float_, const GpuMat& gaussian_shift_, GpuMat* depth_shifted_ ) { using namespace cv::cuda::device; assert( depth_shifted_->cols == depth_float_.cols && depth_shifted_->rows == depth_float_.rows && depth_shifted_->cols == gaussian_shift_.cols && depth_shifted_->rows == gaussian_shift_.rows ); dim3 block( 8, 8, 1); dim3 grid ( divUp (depth_float_.cols, block.x), divUp (depth_float_.rows, block.y ) ); hipLaunchKernelGGL(( cuda_keneral_add_shift), dim3(grid), dim3(block), 0, 0, depth_float_, gaussian_shift_, *depth_shifted_ ); return; } struct gaussian_depth_noise{ float sigma; gaussian_depth_noise(){}; __host__ __device__ float operator()( float& depth, const unsigned int& thread_id ) { float noisy_depth; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); thrust::random::normal_distribution<float> randn(0,1); noisy_depth = (35130/round(35130/round(depth*100) + randn(rng)*(1.0/6.0f) + 0.5))/100; return noisy_depth; } }; void add_depth_noise_barronCVPR2013( float* depth_copy, const int stridef1, const int height) { thrust::device_ptr<float>depth_src(depth_copy); thrust::transform(depth_src, depth_src+stridef1*height, thrust::make_counting_iterator(0), depth_src, gaussian_depth_noise()); } __global__ void get_z_coordinate_only(float4* vertex_with_noise, const unsigned int stridef4, float* noisy_depth, const unsigned int stridef1) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; noisy_depth[y*stridef1+x] = vertex_with_noise[y*stridef4+x].z; } void launch_get_z_coordinate_only(float4* vertex_with_noise, const unsigned int stridef4, const unsigned int width, const unsigned int height, float* noisy_depth, const unsigned int stridef1 ) { dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); hipLaunchKernelGGL(( get_z_coordinate_only), dim3(grid), dim3(block), 0, 0, vertex_with_noise, stridef4, noisy_depth, stridef1); } __global__ void convert_depth2png (float* noisy_depth, const unsigned int stridef1, uint16_t* noisy_depth_png, const unsigned int strideu16) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; noisy_depth_png[y*strideu16+x] = (unsigned short)(noisy_depth[y*stridef1+x]*5000); } void launch_convert_depth2png(float* noisy_depth, const unsigned int stridef1, unsigned short* noisy_depth_png, const unsigned int strideu16, const unsigned int width, const unsigned int height) { dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); hipLaunchKernelGGL(( convert_depth2png), dim3(grid), dim3(block), 0, 0, noisy_depth, stridef1, noisy_depth_png, strideu16); } __device__ float Interpolate(float x0, float x1, float alpha) { return x0 * (1 - alpha) + alpha * x1; } __global__ void cu_generateSmoothNoise(float* smoothNoise, const unsigned int stridef1, float* baseNoise, const float samplePeriod, const float sampleFrequency, unsigned int width, unsigned int height) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //calculate the horizontal sampling indices int sample_i0 = (x / (int)samplePeriod) * (int)samplePeriod; int sample_i1 = (sample_i0 + (int)samplePeriod) % width; //wrap around float horizontal_blend = (x - sample_i0) * sampleFrequency; //calculate the vertical sampling indices int sample_j0 = (y / (int)samplePeriod) * (int)samplePeriod; int sample_j1 = (sample_j0 + (int)samplePeriod) % height; //wrap around float vertical_blend = (y - sample_j0) * sampleFrequency; //blend the top two corners float top = Interpolate(baseNoise[sample_i0+stridef1*sample_j0], baseNoise[sample_i1+stridef1*sample_j0], horizontal_blend); //blend the bottom two corners float bottom = Interpolate(baseNoise[sample_i0+stridef1*sample_j1], baseNoise[sample_i1+stridef1*sample_j1], horizontal_blend); smoothNoise[x+y*stridef1] = Interpolate(top, bottom, vertical_blend); } void generate_smooth_noise(GpuMat* smoothNoise, //iu::ImageGpu_32f_C1 GpuMat* baseNoise, //iu::ImageGpu_32f_C1 const float samplePeriod, const float sampleFrequency, const unsigned int width, const unsigned int height) { dim3 blockdim(boost::math::gcd<unsigned>(width, 32), boost::math::gcd<unsigned>(height, 32), 1); dim3 griddim( width / blockdim.x, height / blockdim.y); hipLaunchKernelGGL(( cu_generateSmoothNoise), dim3(griddim),dim3(blockdim), 0, 0, (float*)smoothNoise->data, smoothNoise->step, (float*)baseNoise->data, samplePeriod, sampleFrequency, smoothNoise->cols, smoothNoise->rows); } __global__ void cu_addNoise2Vertex(float4* vertex, float4* normals, float4* vertex_with_noise, const unsigned int stridef4, float* noise, const unsigned int stridef1, const unsigned int width, const unsigned int height) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if ( x > 0 && x < width && y > 0 && y < height ) { int ind4 = x+y*stridef4; int ind1 = x+y*stridef1; vertex_with_noise[ind4].x = vertex[ind4].x + noise[ind1]* normals[ind4].x; vertex_with_noise[ind4].y = vertex[ind4].y + noise[ind1]* normals[ind4].y; vertex_with_noise[ind4].z = vertex[ind4].z + noise[ind1]* normals[ind4].z; vertex_with_noise[ind4].w = vertex[ind4].w + 0; } } void add_noise2vertex(GpuMat* vertex, //iu::ImageGpu_32f_C4 GpuMat* normals,//iu::ImageGpu_32f_C4 GpuMat* vertex_with_noise,//iu::ImageGpu_32f_C4 GpuMat* perlinNoise)//iu::ImageGpu_32f_C1 { const int2 imageSize = make_int2(vertex->cols, vertex->rows); const int w = imageSize.x; const int h = imageSize.y; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); hipLaunchKernelGGL(( cu_addNoise2Vertex), dim3(griddim),dim3(blockdim), 0, 0, (float4*)vertex->data, (float4 *)normals->data, (float4 *)vertex_with_noise->data, vertex->step, (float*)perlinNoise->data, perlinNoise->step,//stride(), perlinNoise->cols, perlinNoise->rows); return; } __global__ void cu_verts2depth( float* d_depth, const float3* d_vert, const float2 pp, const float2 fl, size_t stridef1, size_t stridef4) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const int index4 = (x + y*stridef4); const float3 v = d_vert[index4]; if( v.z > 0)// && v.z < 1000) { float _x_d = ( v.x*fl.x/v.z ) + pp.x; float _y_d = ( v.y*fl.y/v.z ) + pp.y; int x_d = (int)(_x_d + 0.5f); int y_d = (int)(_y_d + 0.5f); int index = (x_d + y_d*stridef1); d_depth[index] = v.z; } } //iu::ImageGpu_32f_C1 depth //iu::ImageGpu_32f_C4 vertex void convertVerts2Depth(const GpuMat* vertex, GpuMat* depth, float2 pp, float2 fl) { const int2 imageSize = make_int2(depth->cols, depth->rows); const size_t stridef1 = depth->cols; const size_t stridef4 = vertex->cols; const int w = imageSize.x; const int h = imageSize.y; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); hipLaunchKernelGGL(( cu_verts2depth), dim3(griddim), dim3(blockdim), 0, 0, (float*)depth->data, (float3*)vertex->data, pp, fl, stridef1, stridef4); cudaSafeCall ( hipGetLastError () ); return; } __global__ void cuConvertDepth2Verts( float* depth, float3* vertex, const float2 fl, const float2 pp, const unsigned int width, const unsigned int height ) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; vertex[y*width+x] = make_float3( 0.0f,0.0f,0.0f ); if ( x < width && y < height ) { float depthval = depth[y*width+x]; vertex[y*width+x] = make_float3( depthval*((float)x-pp.x)/fl.x, depthval*((float)y-pp.y)/fl.y, depthval ); } return; } void convertDepth2Verts(const GpuMat& depth, GpuMat* vertex, float2 pp, float2 fl) { const int w = depth.cols; const int h = depth.rows; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); hipLaunchKernelGGL(( cuConvertDepth2Verts), dim3(griddim), dim3(blockdim), 0, 0, (float*)depth.data, (float3*)vertex->data, fl, pp, w, h ); cudaSafeCall ( hipGetLastError () ); return; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelFastNormalEstimation (const cv::cuda::PtrStepSz<float3> cvgmPts_, cv::cuda::PtrStepSz<float3> cvgmNls_ ) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= cvgmPts_.cols || nY >= cvgmPts_.rows ) return; float3& fN = cvgmNls_.ptr(nY)[nX]; if (nX == cvgmPts_.cols - 1 || nY >= cvgmPts_.rows - 1 ){ fN.x = fN.y = fN.z = 0.f; return; } const float3& pt = cvgmPts_.ptr(nY)[nX]; const float3& pt1= cvgmPts_.ptr(nY)[nX+1]; //right const float3& pt2= cvgmPts_.ptr(nY+1)[nX]; //down if(isnan(pt.z) ||isnan(pt1.z) ||isnan(pt2.z) ){ fN.x = fN.y = fN.z = 0.f; return; }//if input or its neighour is NaN, float3 v1; v1.x = pt1.x-pt.x; v1.y = pt1.y-pt.y; v1.z = pt1.z-pt.z; float3 v2; v2.x = pt2.x-pt.x; v2.y = pt2.y-pt.y; v2.z = pt2.z-pt.z; //n = v1 x v2 cross product float3 n; n.x = v1.y*v2.z - v1.z*v2.y; n.y = v1.z*v2.x - v1.x*v2.z; n.z = v1.x*v2.y - v1.y*v2.x; //normalization float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z); if( norm < 1.0e-10 ) { fN.x = fN.y = fN.z = 0.f; return; }//set as NaN, n.x /= norm; n.y /= norm; n.z /= norm; if( -n.x*pt.x - n.y*pt.y - n.z*pt.z <0 ){ //this gives (0-pt).dot( n ); fN.x = n.x; fN.y = n.y; fN.z = n.z; }//if facing away from the camera else{ fN.x = -n.x; fN.y = -n.y; fN.z = -n.z; }//else return; } void cudaFastNormalEstimation(const cv::cuda::GpuMat& cvgmPts_, cv::cuda::GpuMat* pcvgmNls_ ) { pcvgmNls_->setTo(0); dim3 block (32, 8); dim3 grid (cv::cuda::device::divUp (cvgmPts_.cols, block.x), cv::cuda::device::divUp (cvgmPts_.rows, block.y)); hipLaunchKernelGGL(( kernelFastNormalEstimation), dim3(grid), dim3(block), 0, 0, cvgmPts_, *pcvgmNls_ ); cudaSafeCall ( hipGetLastError () ); }
33e7ead21604fd584e19185927b6b491c028549b.cu
/* Copyright (c) 2013 Ankur Handa and Shuda Li * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include<thrust/random.h> #include<thrust/transform.h> #include<thrust/device_vector.h> #include<thrust/iterator/counting_iterator.h> #include<thrust/iterator/zip_iterator.h> #include<thrust/tuple.h> #include <thrust/random/normal_distribution.h> #include<curand.h> #include<curand_kernel.h> #include <boost/math/common_factor_rt.hpp> #include <assert.h> #include <opencv2/cudaarithm.hpp> #include <opencv2/core/cuda/common.hpp> #include "add_kinect_noise.cuh" #include "vector_math.hpp" using namespace pcl::device; using namespace cv::cuda; typedef thrust::device_vector<float3>::iterator Float3Iterator; typedef thrust::tuple<Float3Iterator, Float3Iterator> VertexNormalIteratorTuple; typedef thrust::zip_iterator<VertexNormalIteratorTuple> ZipIterator; typedef thrust::tuple<float3, float3> VertexNormalTuple; __host__ __device__ unsigned int hash(unsigned int a) { a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } struct ccd_camera_noise { const float sigma_s_red; const float sigma_s_green; const float sigma_s_blue; const float sigma_c_red; const float sigma_c_green; const float sigma_c_blue; const float scale; ccd_camera_noise(float _sigma_s_red, float _sigma_s_green, float _sigma_s_blue, float _sigma_c_red, float _sigma_c_green, float _sigma_c_blue, float _scale) : sigma_s_red(_sigma_s_red), sigma_s_green(_sigma_s_green), sigma_s_blue(_sigma_s_blue), sigma_c_red(_sigma_c_red), sigma_c_green(_sigma_c_green), sigma_c_blue(_sigma_c_blue), scale(_scale) {} __host__ __device__ float3 operator()(const float3& val, const unsigned int& thread_id) { float3 noisy_pix; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); noisy_pix.x = val.x/scale; noisy_pix.y = val.y/scale; noisy_pix.z = val.z/scale; thrust::random::normal_distribution<float> red_pnoise (0.0f,sqrt(val.x)*sigma_s_red ); thrust::random::normal_distribution<float> green_pnoise(0.0f,sqrt(val.y)*sigma_s_green); thrust::random::normal_distribution<float> blue_pnoise (0.0f,sqrt(val.z)*sigma_s_blue ); thrust::random::normal_distribution<float> red_cnoise (0.0f,sigma_c_red ); thrust::random::normal_distribution<float> green_cnoise (0.0f,sigma_c_green); thrust::random::normal_distribution<float> blue_cnoise (0.0f,sigma_c_blue ); noisy_pix.x = noisy_pix.x + red_pnoise(rng) + red_cnoise(rng); noisy_pix.y = noisy_pix.y + green_pnoise(rng) + green_cnoise(rng); noisy_pix.z = noisy_pix.z + blue_pnoise(rng) + blue_cnoise(rng); return noisy_pix; } }; void launch_add_camera_noise(float3* img_array, float3* noisy_image, const float3& sigma_s, const float3& sigma_c, const unsigned int width, const unsigned int height, float scale) { thrust::device_ptr<float3>img_src(img_array); thrust::device_ptr<float3>img_dest(noisy_image); thrust::transform( img_src, img_src + width*height, thrust::make_counting_iterator(0), img_dest, ccd_camera_noise( sigma_s.x, sigma_s.y, sigma_s.z, sigma_c.x, sigma_c.y, sigma_c.z, scale) ); return; } struct add_kinect_noise { float focal_length; float theta_1; float theta_2; float z1; float z2; float z3; add_kinect_noise(float _focal_length, float _theta_1, float _theta_2, float _z1, float _z2, float _z3): focal_length(_focal_length), theta_1(_theta_1), theta_2(_theta_2), z1(_z1), z2(_z2), z3(_z3){} __host__ __device__ float3 operator()(const VertexNormalTuple& vertex_normal_tuple, const unsigned int& thread_id ) { float3 noisy_3D; float3 noisy_lateral = make_float3(0,0,0); float3 noisy_axial = make_float3(0,0,0); /// Get the seed up clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); const float3 point3D = thrust::get<0>(vertex_normal_tuple); const float3 normal3D = thrust::get<1>(vertex_normal_tuple); float depth = point3D.z; float my_pi = 22.0f/7.0f; /// Subtract the 1 from the dot product; points are represented in homogeneous form with point.w =1 float dot_prod = normal3D.x*point3D.x + normal3D.y*point3D.y + normal3D.z*point3D.z ; /// xyz of point float3 point3D_3 = point3D; float norm_point = sqrtf( point3D_3.x* point3D_3.x + point3D_3.y* point3D_3.y + point3D_3.z*point3D_3.z ); /// negative sign to indicate the position vector of the point starts from the point float theta = fabs(acosf(-dot_prod/norm_point)); float sigma_theta = theta_1 + theta_2*(theta)/(my_pi/2-theta); sigma_theta = sigma_theta*(depth)/focal_length; thrust::random::normal_distribution<float> normal_noise(0,sigma_theta); float noise_level = normal_noise(rng); noisy_lateral.x = point3D.x + noise_level*normal3D.x; noisy_lateral.y = point3D.y + noise_level*normal3D.y; noisy_lateral.z = point3D.z + noise_level*normal3D.z; noisy_3D.x = noisy_lateral.x + noisy_axial.x; noisy_3D.y = noisy_lateral.y + noisy_axial.y; noisy_3D.z = noisy_lateral.z + noisy_axial.z; if ( fabs(my_pi/2 - theta ) <= 8.0/180.0f*my_pi) { noisy_3D.z = 0.0f; } return noisy_3D; } }; void launch_add_kinect_noise(float3* points3D, float3* normals3D, float3* noisy_points, const unsigned int stridef3, const unsigned int height, float focal_length, float theta_1, float theta_2, float z1, float z2, float z3) { thrust::device_ptr<float3>points_src(points3D); thrust::device_ptr<float3>normals_src(normals3D); thrust::device_ptr<float3>points_dest(noisy_points); ZipIterator vertex_normal_tuple(thrust::make_tuple(points_src, normals_src)); try { thrust::transform( vertex_normal_tuple, vertex_normal_tuple+stridef3*height, thrust::make_counting_iterator(0), points_dest,add_kinect_noise(focal_length, theta_1, theta_2, z1, z2, z3) ); } catch(thrust::system_error &e) { // output an error message and exit std::cerr << "Error accessing vector element: " << e.what() << std::endl; exit(-1); } return; } struct colour_from_normals{ colour_from_normals(){}; __host__ __device__ float3 operator()(const float3& normal) { float3 colour; colour.x = ( ( normal.x*128.f+128.f ) ); colour.y = ( ( normal.y*128.f+128.f ) ); colour.z = ( ( normal.z*128.f+128.f ) ); return colour; } }; void launch_colour_from_normals(float3* normals, float3* colour, const unsigned int stridef3, const unsigned int height) { thrust::device_ptr<float3> normal_src(normals); thrust::device_ptr<float3> colour_dest(colour); thrust::transform(normal_src,normal_src + stridef3*height, colour_dest, colour_from_normals()); return; } __global__ void cu_colour_from_normals(const cv::cuda::PtrStepSz<float3> normal, cv::cuda::PtrStepSz<uchar3> color) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= normal.cols && nY >= normal.rows) return; const float3& nl = normal.ptr(nY)[nX]; uchar3& colour = color.ptr(nY)[nX]; float tmp; tmp = nl.x*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.x = uchar( tmp ); tmp = nl.y*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.y = uchar( tmp ); tmp = nl.z*128.f + 128.f; tmp = tmp>1.f? 1.f :tmp; tmp = tmp<0.f? 0.f :tmp; colour.z = uchar( tmp ); return; } void launch_colour_from_normals(const GpuMat& normals, GpuMat* colour) { //define grid and block dim3 block(32, 8); dim3 grid(cv::cuda::device::divUp(normals.cols, block.x), cv::cuda::device::divUp(normals.rows, block.y)); //run kernel cu_colour_from_normals<<<grid,block>>>( normals,*colour ); cudaSafeCall ( cudaGetLastError () ); } struct gaussian_rand{ float sigma; gaussian_rand(float _sigma):sigma(_sigma){}; __host__ __device__ float2 operator()( float2 point, const unsigned int& thread_id ) { float2 noise; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); thrust::random::normal_distribution<float> randn(0,1); noise.x = randn(rng)/sigma; noise.y = randn(rng)/sigma; return noise; } }; void gaussian_shifts(float2* tex_coods, const unsigned int stridef2, const unsigned int height, const float _sigma) { thrust::device_ptr<float2>coords_src(tex_coods); thrust::transform(coords_src,coords_src+stridef2*height, thrust::make_counting_iterator(0), coords_src, gaussian_rand(_sigma) ); } __global__ void cuda_keneral_add_shift (const PtrStepSz<float> depth, const PtrStepSz<float2> gaussian_shift, PtrStepSz<float> shifted_depth ) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= depth.cols || nY >= depth.rows ) return; const float2& sh = gaussian_shift.ptr(nY)[nX]; int2 location = make_int2( round( nX + sh.x ), round( nY + sh.y ) ); int max; max = depth.cols-1; location.x = location.x < 0 ? 0 : location.x; location.x = location.x > max? max : location.x; max = depth.rows-1; location.y = location.y < 0 ? 0 : location.y; location.y = location.y > max? max : location.y; shifted_depth.ptr(nY)[nX] = depth.ptr(location.y)[location.x]; return; } void add_gaussian_shifts( const GpuMat& depth_float_, const GpuMat& gaussian_shift_, GpuMat* depth_shifted_ ) { using namespace cv::cuda::device; assert( depth_shifted_->cols == depth_float_.cols && depth_shifted_->rows == depth_float_.rows && depth_shifted_->cols == gaussian_shift_.cols && depth_shifted_->rows == gaussian_shift_.rows ); dim3 block( 8, 8, 1); dim3 grid ( divUp (depth_float_.cols, block.x), divUp (depth_float_.rows, block.y ) ); cuda_keneral_add_shift<<<grid, block>>>( depth_float_, gaussian_shift_, *depth_shifted_ ); return; } struct gaussian_depth_noise{ float sigma; gaussian_depth_noise(){}; __host__ __device__ float operator()( float& depth, const unsigned int& thread_id ) { float noisy_depth; clock_t start_time = clock(); unsigned int seed = hash(thread_id) + start_time; thrust::minstd_rand rng(seed); thrust::random::normal_distribution<float> randn(0,1); noisy_depth = (35130/round(35130/round(depth*100) + randn(rng)*(1.0/6.0f) + 0.5))/100; return noisy_depth; } }; void add_depth_noise_barronCVPR2013( float* depth_copy, const int stridef1, const int height) { thrust::device_ptr<float>depth_src(depth_copy); thrust::transform(depth_src, depth_src+stridef1*height, thrust::make_counting_iterator(0), depth_src, gaussian_depth_noise()); } __global__ void get_z_coordinate_only(float4* vertex_with_noise, const unsigned int stridef4, float* noisy_depth, const unsigned int stridef1) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; noisy_depth[y*stridef1+x] = vertex_with_noise[y*stridef4+x].z; } void launch_get_z_coordinate_only(float4* vertex_with_noise, const unsigned int stridef4, const unsigned int width, const unsigned int height, float* noisy_depth, const unsigned int stridef1 ) { dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); get_z_coordinate_only<<<grid, block>>>(vertex_with_noise, stridef4, noisy_depth, stridef1); } __global__ void convert_depth2png (float* noisy_depth, const unsigned int stridef1, uint16_t* noisy_depth_png, const unsigned int strideu16) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; noisy_depth_png[y*strideu16+x] = (unsigned short)(noisy_depth[y*stridef1+x]*5000); } void launch_convert_depth2png(float* noisy_depth, const unsigned int stridef1, unsigned short* noisy_depth_png, const unsigned int strideu16, const unsigned int width, const unsigned int height) { dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); convert_depth2png<<<grid, block>>>(noisy_depth, stridef1, noisy_depth_png, strideu16); } __device__ float Interpolate(float x0, float x1, float alpha) { return x0 * (1 - alpha) + alpha * x1; } __global__ void cu_generateSmoothNoise(float* smoothNoise, const unsigned int stridef1, float* baseNoise, const float samplePeriod, const float sampleFrequency, unsigned int width, unsigned int height) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //calculate the horizontal sampling indices int sample_i0 = (x / (int)samplePeriod) * (int)samplePeriod; int sample_i1 = (sample_i0 + (int)samplePeriod) % width; //wrap around float horizontal_blend = (x - sample_i0) * sampleFrequency; //calculate the vertical sampling indices int sample_j0 = (y / (int)samplePeriod) * (int)samplePeriod; int sample_j1 = (sample_j0 + (int)samplePeriod) % height; //wrap around float vertical_blend = (y - sample_j0) * sampleFrequency; //blend the top two corners float top = Interpolate(baseNoise[sample_i0+stridef1*sample_j0], baseNoise[sample_i1+stridef1*sample_j0], horizontal_blend); //blend the bottom two corners float bottom = Interpolate(baseNoise[sample_i0+stridef1*sample_j1], baseNoise[sample_i1+stridef1*sample_j1], horizontal_blend); smoothNoise[x+y*stridef1] = Interpolate(top, bottom, vertical_blend); } void generate_smooth_noise(GpuMat* smoothNoise, //iu::ImageGpu_32f_C1 GpuMat* baseNoise, //iu::ImageGpu_32f_C1 const float samplePeriod, const float sampleFrequency, const unsigned int width, const unsigned int height) { dim3 blockdim(boost::math::gcd<unsigned>(width, 32), boost::math::gcd<unsigned>(height, 32), 1); dim3 griddim( width / blockdim.x, height / blockdim.y); cu_generateSmoothNoise<<<griddim,blockdim>>>((float*)smoothNoise->data, smoothNoise->step, (float*)baseNoise->data, samplePeriod, sampleFrequency, smoothNoise->cols, smoothNoise->rows); } __global__ void cu_addNoise2Vertex(float4* vertex, float4* normals, float4* vertex_with_noise, const unsigned int stridef4, float* noise, const unsigned int stridef1, const unsigned int width, const unsigned int height) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if ( x > 0 && x < width && y > 0 && y < height ) { int ind4 = x+y*stridef4; int ind1 = x+y*stridef1; vertex_with_noise[ind4].x = vertex[ind4].x + noise[ind1]* normals[ind4].x; vertex_with_noise[ind4].y = vertex[ind4].y + noise[ind1]* normals[ind4].y; vertex_with_noise[ind4].z = vertex[ind4].z + noise[ind1]* normals[ind4].z; vertex_with_noise[ind4].w = vertex[ind4].w + 0; } } void add_noise2vertex(GpuMat* vertex, //iu::ImageGpu_32f_C4 GpuMat* normals,//iu::ImageGpu_32f_C4 GpuMat* vertex_with_noise,//iu::ImageGpu_32f_C4 GpuMat* perlinNoise)//iu::ImageGpu_32f_C1 { const int2 imageSize = make_int2(vertex->cols, vertex->rows); const int w = imageSize.x; const int h = imageSize.y; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); cu_addNoise2Vertex<<<griddim,blockdim>>>((float4*)vertex->data, (float4 *)normals->data, (float4 *)vertex_with_noise->data, vertex->step, (float*)perlinNoise->data, perlinNoise->step,//stride(), perlinNoise->cols, perlinNoise->rows); return; } __global__ void cu_verts2depth( float* d_depth, const float3* d_vert, const float2 pp, const float2 fl, size_t stridef1, size_t stridef4) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const int index4 = (x + y*stridef4); const float3 v = d_vert[index4]; if( v.z > 0)// && v.z < 1000) { float _x_d = ( v.x*fl.x/v.z ) + pp.x; float _y_d = ( v.y*fl.y/v.z ) + pp.y; int x_d = (int)(_x_d + 0.5f); int y_d = (int)(_y_d + 0.5f); int index = (x_d + y_d*stridef1); d_depth[index] = v.z; } } //iu::ImageGpu_32f_C1 depth //iu::ImageGpu_32f_C4 vertex void convertVerts2Depth(const GpuMat* vertex, GpuMat* depth, float2 pp, float2 fl) { const int2 imageSize = make_int2(depth->cols, depth->rows); const size_t stridef1 = depth->cols; const size_t stridef4 = vertex->cols; const int w = imageSize.x; const int h = imageSize.y; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); cu_verts2depth<<<griddim, blockdim>>>((float*)depth->data, (float3*)vertex->data, pp, fl, stridef1, stridef4); cudaSafeCall ( cudaGetLastError () ); return; } __global__ void cuConvertDepth2Verts( float* depth, float3* vertex, const float2 fl, const float2 pp, const unsigned int width, const unsigned int height ) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; vertex[y*width+x] = make_float3( 0.0f,0.0f,0.0f ); if ( x < width && y < height ) { float depthval = depth[y*width+x]; vertex[y*width+x] = make_float3( depthval*((float)x-pp.x)/fl.x, depthval*((float)y-pp.y)/fl.y, depthval ); } return; } void convertDepth2Verts(const GpuMat& depth, GpuMat* vertex, float2 pp, float2 fl) { const int w = depth.cols; const int h = depth.rows; dim3 blockdim(boost::math::gcd<unsigned>(w, 32), boost::math::gcd<unsigned>(h, 32), 1); dim3 griddim( w / blockdim.x, h / blockdim.y); cuConvertDepth2Verts<<<griddim, blockdim>>> ( (float*)depth.data, (float3*)vertex->data, fl, pp, w, h ); cudaSafeCall ( cudaGetLastError () ); return; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void kernelFastNormalEstimation (const cv::cuda::PtrStepSz<float3> cvgmPts_, cv::cuda::PtrStepSz<float3> cvgmNls_ ) { const int nX = blockDim.x * blockIdx.x + threadIdx.x; const int nY = blockDim.y * blockIdx.y + threadIdx.y; if (nX >= cvgmPts_.cols || nY >= cvgmPts_.rows ) return; float3& fN = cvgmNls_.ptr(nY)[nX]; if (nX == cvgmPts_.cols - 1 || nY >= cvgmPts_.rows - 1 ){ fN.x = fN.y = fN.z = 0.f; return; } const float3& pt = cvgmPts_.ptr(nY)[nX]; const float3& pt1= cvgmPts_.ptr(nY)[nX+1]; //right const float3& pt2= cvgmPts_.ptr(nY+1)[nX]; //down if(isnan(pt.z) ||isnan(pt1.z) ||isnan(pt2.z) ){ fN.x = fN.y = fN.z = 0.f; return; }//if input or its neighour is NaN, float3 v1; v1.x = pt1.x-pt.x; v1.y = pt1.y-pt.y; v1.z = pt1.z-pt.z; float3 v2; v2.x = pt2.x-pt.x; v2.y = pt2.y-pt.y; v2.z = pt2.z-pt.z; //n = v1 x v2 cross product float3 n; n.x = v1.y*v2.z - v1.z*v2.y; n.y = v1.z*v2.x - v1.x*v2.z; n.z = v1.x*v2.y - v1.y*v2.x; //normalization float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z); if( norm < 1.0e-10 ) { fN.x = fN.y = fN.z = 0.f; return; }//set as NaN, n.x /= norm; n.y /= norm; n.z /= norm; if( -n.x*pt.x - n.y*pt.y - n.z*pt.z <0 ){ //this gives (0-pt).dot( n ); fN.x = n.x; fN.y = n.y; fN.z = n.z; }//if facing away from the camera else{ fN.x = -n.x; fN.y = -n.y; fN.z = -n.z; }//else return; } void cudaFastNormalEstimation(const cv::cuda::GpuMat& cvgmPts_, cv::cuda::GpuMat* pcvgmNls_ ) { pcvgmNls_->setTo(0); dim3 block (32, 8); dim3 grid (cv::cuda::device::divUp (cvgmPts_.cols, block.x), cv::cuda::device::divUp (cvgmPts_.rows, block.y)); kernelFastNormalEstimation<<<grid, block>>>(cvgmPts_, *pcvgmNls_ ); cudaSafeCall ( cudaGetLastError () ); }
b704bbcd478720ae138fb853e860ed0a5ee22977.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #define height 50 #define width 50 // Device code __global__ void kernel(float* devPtr, int pitch) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } //Host Code int main() { float* devPtr; size_t pitch; hipMallocPitch((void**)&devPtr, &pitch, width * sizeof(float), height); printf("%d\n", (int)pitch); hipLaunchKernelGGL(( kernel), dim3(100), dim3(512), 0, 0, devPtr, pitch); return 0; }
b704bbcd478720ae138fb853e860ed0a5ee22977.cu
#include<stdio.h> #include<cuda.h> #define height 50 #define width 50 // Device code __global__ void kernel(float* devPtr, int pitch) { for (int r = 0; r < height; ++r) { float* row = (float*)((char*)devPtr + r * pitch); for (int c = 0; c < width; ++c) { float element = row[c]; } } } //Host Code int main() { float* devPtr; size_t pitch; cudaMallocPitch((void**)&devPtr, &pitch, width * sizeof(float), height); printf("%d\n", (int)pitch); kernel<<<100, 512>>>(devPtr, pitch); return 0; }
bcb62b7ec082dd90fccd6f1986c1de3b54068276.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2015/2016, March 15 - April 15 // ### // ### #include <stdio.h> #include <cstring> #include <iostream> #include "helper.h" #include "lmmin.h" #include "shapeRegistration.h" #include "shapeRegistrationGPU.h" // #include "testingGPU.h" #define DIM_C_REF 5 double normFactor[81] = {1.5707963267949, 0.471404520791033, 0.196349540849362, 0.0942809041582067, 0.0490873852123405, 0.026937401188059, 0.0153398078788564, 0.00897913372935302, 0.00536893275759974, 0.471404520791033, 0.125, 0.0471404520791033, 0.0208333333333333, 0.0101015254455221, 0.00520833333333333, 0.00280597929042281, 0.0015625, 0.00089281159240726, 0.196349540849362, 0.0471404520791033, 0.0163624617374468, 0.00673435029701476, 0.00306796157577128, 0.00149652228822551, 0.00076699039394282, 0.000408142442243319, 0.000223705531566656, 0.0942809041582067, 0.0208333333333333, 0.00673435029701476, 0.00260416666666667, 0.00112239171616913, 0.000520833333333333, 0.000255089026402074, 0.000130208333333333, 0.0000686778148005585, 0.0490873852123405, 0.0101015254455221, 0.00306796157577128, 0.00112239171616913, 0.000460194236365692, 0.000204071221121659, 0.0000958737992428525, 0.000047093358720383, 0.0000239684498107131, 0.0269374011880590, 0.00520833333333333, 0.00149652228822551, 0.000520833333333333, 0.000204071221121659, 0.0000868055555555556, 0.0000392444656003192, 0.0000186011904761905, 0.0000091570419734078, 0.0153398078788564, 0.00280597929042281, 0.00076699039394282, 0.000255089026402074, 0.0000958737992428525, 0.0000392444656003192, 0.0000171203212933665, 0.00000784889312006383, 0.00000374507028292392, 0.00897913372935302, 0.0015625, 0.000408142442243319, 0.000130208333333333, 0.0000470933587203830, 0.0000186011904761905, 0.00000784889312006383, 0.00000348772321428571, 0.00000161594858354255, 0.00536893275759974, 0.00089281159240726, 0.000223705531566656, 0.0000686778148005585, 0.0000239684498107131, 0.0000091570419734078, 0.00000374507028292392, 0.00000161594858354255, 0.000000728208110568542}; using namespace std; int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be // initialized // This happens on the very first call to a CUDA function, and takes some time // (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, // and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, // and false otherwise // input template image bool ret; string templateStr = ""; ret = getParam("t", templateStr, argc, argv); if (!ret) cerr << "ERROR: no template image specified" << endl; // input observation image string observationStr = ""; ret = getParam("o", observationStr, argc, argv); if (!ret) cerr << "ERROR: no observation image specified" << endl; // show the usage instructions if (argc <= 3) { cout << "Usage: " << argv[0] << " -t <template> -o <observation>" << endl; return 1; } // maximum number of iterations for the Levenberg-Marquardt (patience) int patience = 125; getParam("lp", patience, argc, argv); if (patience < 1) { cerr << "ERROR: the patience for the Levenberg-Marquardt must be >=1" << endl; return 1; } // Relative error desired in the sum of squares. double ftol = 0.0001; getParam("lftol", ftol, argc, argv); if (ftol <= 0) { cerr << "ERROR: the lftol must be positive" << endl; return 1; } // Relative error between last two approximations. double xtol = 0.0001; getParam("lxtol", xtol, argc, argv); if (xtol <= 0) { cerr << "ERROR: the lxtol must be positive" << endl; return 1; } double gtol = ftol; getParam("lgtol", gtol, argc, argv); if (gtol <= 0) { cerr << "ERROR: the lgtol must be positive" << endl; return 1; } double epsilon = ftol; getParam("le", epsilon, argc, argv); if (epsilon <= 0) { cerr << "ERROR: the le must be positive" << endl; return 1; } double stepb = 100; getParam("lsb", stepb, argc, argv); if (stepb <= 0) { cerr << "ERROR: the lsb must be positive" << endl; return 1; } int scaled = 1; getParam("lsd", scaled, argc, argv); if (scaled != 0 and scaled != 1) { cerr << "ERROR: the lsd must be either 0 or 1" << endl; return 1; } // Verbosity level int verb = 1; getParam("lv", verb, argc, argv); // Load the input image using opencv (load as "grayscale", since we are // working only with binary shapes of single channel) cv::Mat observationIn = cv::imread(observationStr.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // check if (observationIn.data == NULL) { cerr << "ERROR: Could not load observation image " << observationStr << endl; return 1; } cv::Mat templateIn = cv::imread(templateStr.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // check if (templateIn.data == NULL) { cerr << "ERROR: Could not load template image " << templateStr << endl; return 1; } // convert to float representation (opencv loads image values as single bytes // by default) templateIn.convertTo(templateIn, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) templateIn /= 255.f; // get image dimensions int t_w = templateIn.cols; // width int t_h = templateIn.rows; // height cout << "template image: " << t_w << " x " << t_h << endl; observationIn.convertTo(observationIn, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) observationIn /= 255.f; // get image dimensions int o_w = observationIn.cols; // width int o_h = observationIn.rows; // height cout << "observation image: " << o_w << " x " << o_h << endl; // allocate raw input image array float *observationImg = new float[(size_t)o_w * o_h]; float *templateImg = new float[(size_t)t_w * t_h]; // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for // loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered(templateImg, templateIn); convert_mat_to_layered(observationImg, observationIn); Timer timer; timer.start(); // testALLGPU(templateImg, templateIn, observationImg, observationIn, t_w, // t_h, o_w, o_h, imgOut) ; // show input images showImage("templateIn", templateIn, 100, 100); showImage("observationIn", observationIn, 310, 100); float *resizedTemplate; float *resizedObservation; int rt_w; // resized template width int rt_h; // resized template height int ro_w; // resized observation width int ro_h; // resized observation height Margins observationMargins; Margins templateMargins; cutMargins(templateImg, t_w, t_h, resizedTemplate, rt_w, rt_h, templateMargins); cutMargins(observationImg, o_w, o_h, resizedObservation, ro_w, ro_h, observationMargins); // we also need the center of mass for normailisation float xCentTemplate; float yCentTemplate; // normalized quadCoords of Template centerOfMass(resizedTemplate, rt_w, rt_h, xCentTemplate, yCentTemplate); QuadCoords *qTemplate = new QuadCoords[rt_w * rt_h]; setQuadCoords(qTemplate, rt_w, rt_h); float t_sx = 1, t_sy = 1; // Normalisation factors qCoordsNormalization(rt_w, rt_h, qTemplate, xCentTemplate, yCentTemplate, t_sx, t_sy); PixelCoords *pTemplate = new PixelCoords[rt_w * rt_h]; setPixelCoords(pTemplate, rt_w, rt_h); pCoordsNormalisation(rt_w, rt_h, pTemplate, xCentTemplate, yCentTemplate, t_sx, t_sy); // TPS transformation parameters TPSParams tpsParams; float xCentObservation; float yCentObservation; centerOfMass(resizedObservation, ro_w, ro_h, xCentObservation, yCentObservation); PixelCoords *pResizedObservation = new PixelCoords[ro_w * ro_h]; setPixelCoords(pResizedObservation, ro_w, ro_h); float o_sx = 1, o_sy = 1; // Normalisation factors pCoordsNormalisation(ro_w, ro_h, pResizedObservation, xCentObservation, yCentObservation, o_sx, o_sy); // Pack the parameters for the lmmin() objective function int sizePar = 6 + (2 * DIM_C_REF * DIM_C_REF); double *par = new double[sizePar]; // Pack the affineParam for (int i = 0; i < 6; i++) { par[i] = tpsParams.affineParam[i]; } // Pack the localCoeff for (int i = 0; i < 2 * DIM_C_REF * DIM_C_REF; i++) { par[i + 6] = tpsParams.localCoeff[i]; } // Pack the auxiliary data for the lmmin() objective function // Data format (all floats) [number of elements, name]: // 1, rt_w // 1, rt_h // 1, ro_w // 1, ro_h // rt_w * rt_h, templateImg // ro_w * ro_h, observationImg // 81, normalization // 2 * rt_w * rt_h, pTemplate // 8 * rt_w * rt_h, qTemplate // 2 * rt_w * rt_h, pObservation // 1, t_sx, // 1, t_sy, // 1, o_sx // 1, o_sy int sizeData = (4) + (rt_w * rt_h) + (ro_w * ro_h) + (81) + (2 * rt_w * rt_h) + (8 * rt_w * rt_h) + (2 * ro_w * ro_h) + (4); float *data = new float[sizeData]; // current writing position in the data array int offset = 0; // Pack the sizes of the arrays data[offset] = rt_w; data[offset + 1] = rt_h; data[offset + 2] = ro_w; data[offset + 3] = ro_h; // We wrote 4 elements, move the reading position 4 places offset += 4; // Template image array for (int i = 0; i < rt_w * rt_h; i++) { data[offset + i] = resizedTemplate[i]; } offset += rt_w * rt_h; // Observation image array for (int i = 0; i < ro_w * ro_h; i++) { data[offset + i] = resizedObservation[i]; } offset += ro_w * ro_h; // Normalization factors (N_i for eq.22) for (int i = 0; i < 81; i++) { data[offset + i] = normFactor[i]; } offset += 81; // Pixel coordinates of the template // Every element is a struct with two fields: x, y for (int i = 0; i < rt_w * rt_h; i++) { data[offset + 2 * i] = pTemplate[i].x; data[offset + 2 * i + 1] = pTemplate[i].y; } offset += 2 * rt_w * rt_h; // Quad coordinates of the template // Every element has two fields (x,y) that are arrays of four elements // (corners) for (int i = 0; i < rt_w * rt_h; i++) { data[offset + 8 * i] = qTemplate[i].x[0]; data[offset + 8 * i + 1] = qTemplate[i].y[0]; data[offset + 8 * i + 2] = qTemplate[i].x[1]; data[offset + 8 * i + 3] = qTemplate[i].y[1]; data[offset + 8 * i + 4] = qTemplate[i].x[2]; data[offset + 8 * i + 5] = qTemplate[i].y[2]; data[offset + 8 * i + 6] = qTemplate[i].x[3]; data[offset + 8 * i + 7] = qTemplate[i].y[3]; } offset += 8 * rt_w * rt_h; // Pixel coordinates of the observation // Every element is a struct with two fields: x, y for (int i = 0; i < ro_w * ro_h; i++) { data[offset + 2 * i] = pResizedObservation[i].x; data[offset + 2 * i + 1] = pResizedObservation[i].y; } offset += 2 * ro_w * ro_h; // Normalisation factors of the template data[offset] = t_sx; data[offset + 1] = t_sy; offset += 2; // Normalisation factors of the observation data[offset] = o_sx; data[offset + 1] = o_sy; offset += 2; // Configuration parameters for the lmmin() // Number of equations int m_dat = 87; // Parameter collection for tuning the fit procedure. In most cases, the // default &lm_control_double is adequate. If f is only computed with // single-precision accuracy, &lm_control_float should be used. See also // below, NOTES on initializing parameter records. lm_control_struct control = lm_control_float; // Relative error desired in the sum of squares. Recommended setting: somewhat // above machine precision; less if fvec is computed with reduced accuracy. control.ftol = ftol; printf("\nSolver ftol: %f\n", control.ftol); // Relative error between last two approximations. Recommended setting: as // ftol. control.xtol = xtol; printf("Solver xtol: %f\n", control.xtol); // A measure for degeneracy. Recommended setting: as ftol. control.gtol = ftol; printf("Solver gtol: %f\n", control.gtol); // Step used to calculate the Jacobian. Recommended setting: as ftol, but // definitely less than the accuracy of fvec. control.epsilon = epsilon; printf("Solver epsilon: %f\n", control.epsilon); // Initial bound to steps in the outer loop, generally between 0.01 and 100; // recommended value is 100. control.stepbound = stepb; printf("Solver stepbound: %f\n", control.stepbound); // Used to set the maximum number of function evaluations to patience*n_par. control.patience = patience; printf("Solver patience: %d (%d objective function calls)\n", control.patience, control.patience * 56); // Logical switch (0 or 1). If 1, then scale parameters to their initial // value. This is the recommended setting. control.scale_diag = scaled; printf("Solver scale_diag: %d\n", control.scale_diag); // Progress messages will be written to this file. Typically stdout or stderr. // The value NULL will be interpreted as stdout. control.msgfile = NULL; // If nonzero, some progress information from within the LM algorithm is // written to control.stream. control.verbosity = verb; printf("Solver verbosity level: %d\n", control.verbosity); // Status object lm_status_struct status; // Call the lmmin() using the wrapper for the objective function printf("\nSolving the system...\n"); //lmmin(sizePar, par, m_dat, data, lmminObjectiveWrapperGPU, &control, &status); lmmin(sizePar, par, m_dat, data, lmminObjectiveWrapperGPU, &control, &status); printf("Solving completed!\n\n"); // Translate the found vector of parameters to the tpsParams // Unpack the affineParam for (int i = 0; i < 6; i++) { tpsParams.affineParam[i] = par[i]; } // Unpack the localCoeff for (int i = 0; i < 2 * DIM_C_REF * DIM_C_REF; i++) { tpsParams.localCoeff[i] = par[i + 6]; } // compensating for the translation caused by image cropping (see Matlab) float o_tx = -(xCentObservation + observationMargins.top) * o_sx; float o_ty = -(yCentObservation + observationMargins.left) * o_sy; // Denormalize the coefficients for the final transformation for (int j = 0; j < 3; j++) { tpsParams.affineParam[j] /= o_sx; tpsParams.affineParam[3 + j] /= o_sy; } tpsParams.affineParam[2] -= o_tx / o_sx; tpsParams.affineParam[5] -= o_ty / o_sy; for (int j = 0; j < DIM_C_REF * DIM_C_REF; j++) { tpsParams.localCoeff[j] /= o_sx; tpsParams.localCoeff[DIM_C_REF * DIM_C_REF + j] /= o_sy; } // Apply the decided transformation on the normalized quad coordinates of the // template qTPS(rt_w, rt_h, qTemplate, tpsParams, DIM_C_REF); // Find the dimensions needed to fit the registered shape int x_min = 0, x_max = 0, y_min = 0, y_max = 0; for (int i = 0; i < rt_w * rt_h; i++) { for (int q = 0; q < 4; q++) { if (qTemplate[i].x[q] < x_min) x_min = qTemplate[i].x[q]; if (qTemplate[i].x[q] > x_max) x_max = qTemplate[i].x[q]; if (qTemplate[i].y[q] < y_min) y_min = qTemplate[i].y[q]; if (qTemplate[i].y[q] > y_max) y_max = qTemplate[i].y[q]; } } // Dimensions of the full registered shape image int reg_w = x_max - x_min + 1; int reg_h = y_max - y_min + 1; float *registered = new float[reg_w * reg_h]; // TODO: The transfer function requires the output to be pre-initialized. // Change the implementation of the transfer() and remove this. for (int i = 0; i < reg_w * reg_h; i++) { registered[i] = 0; } // Transfer (map) the transformed quad coordinates to pixel coordinates. // Store the result to the pRegistered PixelCoords *pRegistered = new PixelCoords[reg_w * reg_h]; setPixelCoords(pRegistered, reg_w, reg_h); transfer(registered, pRegistered, reg_w, reg_h, resizedTemplate, qTemplate, rt_w, rt_h); // Crop the result Margins registeredMargins; float *resizedRegistered; int rreg_w, rreg_h; cutMargins(registered, reg_w, reg_h, resizedRegistered, rreg_w, rreg_h, registeredMargins); // stop timer here timer.end(); float t = timer.get(); // elapsed time in seconds cout << "time: " << t * 1000 << " ms" << endl; // Convert and show the transformed output cv::Mat resizedImRegistered(rreg_h, rreg_w, CV_32FC1); convert_layered_to_mat(resizedImRegistered, resizedRegistered); showImage("Registered shape", resizedImRegistered, 520, 100); // wait for key inputs cv::waitKey(0); // save input and result cv::imwrite("image_template.png", templateIn * 255.f); // "imwrite" assumes channel range [0,255]*/ printf("Template image was written in the image_template.png.\n"); cv::imwrite("image_observation.png", observationIn * 255.f); printf("Observation image was written in the image_observation.png.\n"); cv::imwrite("image_registered.png", resizedImRegistered * 255.f); printf("Registered shape image was written in the image_registered.png.\n"); // free allocated arrays delete[] observationImg; delete[] templateImg; delete[] registered; delete[] resizedRegistered; delete[] qTemplate; delete[] pTemplate; delete[] pResizedObservation; delete[] pRegistered; delete[] par; delete[] data; // close all opencv windows cvDestroyAllWindows(); return 0; }
bcb62b7ec082dd90fccd6f1986c1de3b54068276.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2015/2016, March 15 - April 15 // ### // ### #include <stdio.h> #include <cstring> #include <iostream> #include "helper.h" #include "lmmin.h" #include "shapeRegistration.h" #include "shapeRegistrationGPU.h" // #include "testingGPU.h" #define DIM_C_REF 5 double normFactor[81] = {1.5707963267949, 0.471404520791033, 0.196349540849362, 0.0942809041582067, 0.0490873852123405, 0.026937401188059, 0.0153398078788564, 0.00897913372935302, 0.00536893275759974, 0.471404520791033, 0.125, 0.0471404520791033, 0.0208333333333333, 0.0101015254455221, 0.00520833333333333, 0.00280597929042281, 0.0015625, 0.00089281159240726, 0.196349540849362, 0.0471404520791033, 0.0163624617374468, 0.00673435029701476, 0.00306796157577128, 0.00149652228822551, 0.00076699039394282, 0.000408142442243319, 0.000223705531566656, 0.0942809041582067, 0.0208333333333333, 0.00673435029701476, 0.00260416666666667, 0.00112239171616913, 0.000520833333333333, 0.000255089026402074, 0.000130208333333333, 0.0000686778148005585, 0.0490873852123405, 0.0101015254455221, 0.00306796157577128, 0.00112239171616913, 0.000460194236365692, 0.000204071221121659, 0.0000958737992428525, 0.000047093358720383, 0.0000239684498107131, 0.0269374011880590, 0.00520833333333333, 0.00149652228822551, 0.000520833333333333, 0.000204071221121659, 0.0000868055555555556, 0.0000392444656003192, 0.0000186011904761905, 0.0000091570419734078, 0.0153398078788564, 0.00280597929042281, 0.00076699039394282, 0.000255089026402074, 0.0000958737992428525, 0.0000392444656003192, 0.0000171203212933665, 0.00000784889312006383, 0.00000374507028292392, 0.00897913372935302, 0.0015625, 0.000408142442243319, 0.000130208333333333, 0.0000470933587203830, 0.0000186011904761905, 0.00000784889312006383, 0.00000348772321428571, 0.00000161594858354255, 0.00536893275759974, 0.00089281159240726, 0.000223705531566656, 0.0000686778148005585, 0.0000239684498107131, 0.0000091570419734078, 0.00000374507028292392, 0.00000161594858354255, 0.000000728208110568542}; using namespace std; int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be // initialized // This happens on the very first call to a CUDA function, and takes some time // (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, // and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, // and false otherwise // input template image bool ret; string templateStr = ""; ret = getParam("t", templateStr, argc, argv); if (!ret) cerr << "ERROR: no template image specified" << endl; // input observation image string observationStr = ""; ret = getParam("o", observationStr, argc, argv); if (!ret) cerr << "ERROR: no observation image specified" << endl; // show the usage instructions if (argc <= 3) { cout << "Usage: " << argv[0] << " -t <template> -o <observation>" << endl; return 1; } // maximum number of iterations for the Levenberg-Marquardt (patience) int patience = 125; getParam("lp", patience, argc, argv); if (patience < 1) { cerr << "ERROR: the patience for the Levenberg-Marquardt must be >=1" << endl; return 1; } // Relative error desired in the sum of squares. double ftol = 0.0001; getParam("lftol", ftol, argc, argv); if (ftol <= 0) { cerr << "ERROR: the lftol must be positive" << endl; return 1; } // Relative error between last two approximations. double xtol = 0.0001; getParam("lxtol", xtol, argc, argv); if (xtol <= 0) { cerr << "ERROR: the lxtol must be positive" << endl; return 1; } double gtol = ftol; getParam("lgtol", gtol, argc, argv); if (gtol <= 0) { cerr << "ERROR: the lgtol must be positive" << endl; return 1; } double epsilon = ftol; getParam("le", epsilon, argc, argv); if (epsilon <= 0) { cerr << "ERROR: the le must be positive" << endl; return 1; } double stepb = 100; getParam("lsb", stepb, argc, argv); if (stepb <= 0) { cerr << "ERROR: the lsb must be positive" << endl; return 1; } int scaled = 1; getParam("lsd", scaled, argc, argv); if (scaled != 0 and scaled != 1) { cerr << "ERROR: the lsd must be either 0 or 1" << endl; return 1; } // Verbosity level int verb = 1; getParam("lv", verb, argc, argv); // Load the input image using opencv (load as "grayscale", since we are // working only with binary shapes of single channel) cv::Mat observationIn = cv::imread(observationStr.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // check if (observationIn.data == NULL) { cerr << "ERROR: Could not load observation image " << observationStr << endl; return 1; } cv::Mat templateIn = cv::imread(templateStr.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // check if (templateIn.data == NULL) { cerr << "ERROR: Could not load template image " << templateStr << endl; return 1; } // convert to float representation (opencv loads image values as single bytes // by default) templateIn.convertTo(templateIn, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) templateIn /= 255.f; // get image dimensions int t_w = templateIn.cols; // width int t_h = templateIn.rows; // height cout << "template image: " << t_w << " x " << t_h << endl; observationIn.convertTo(observationIn, CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) observationIn /= 255.f; // get image dimensions int o_w = observationIn.cols; // width int o_h = observationIn.rows; // height cout << "observation image: " << o_w << " x " << o_h << endl; // allocate raw input image array float *observationImg = new float[(size_t)o_w * o_h]; float *templateImg = new float[(size_t)t_w * t_h]; // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for // loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered(templateImg, templateIn); convert_mat_to_layered(observationImg, observationIn); Timer timer; timer.start(); // testALLGPU(templateImg, templateIn, observationImg, observationIn, t_w, // t_h, o_w, o_h, imgOut) ; // show input images showImage("templateIn", templateIn, 100, 100); showImage("observationIn", observationIn, 310, 100); float *resizedTemplate; float *resizedObservation; int rt_w; // resized template width int rt_h; // resized template height int ro_w; // resized observation width int ro_h; // resized observation height Margins observationMargins; Margins templateMargins; cutMargins(templateImg, t_w, t_h, resizedTemplate, rt_w, rt_h, templateMargins); cutMargins(observationImg, o_w, o_h, resizedObservation, ro_w, ro_h, observationMargins); // we also need the center of mass for normailisation float xCentTemplate; float yCentTemplate; // normalized quadCoords of Template centerOfMass(resizedTemplate, rt_w, rt_h, xCentTemplate, yCentTemplate); QuadCoords *qTemplate = new QuadCoords[rt_w * rt_h]; setQuadCoords(qTemplate, rt_w, rt_h); float t_sx = 1, t_sy = 1; // Normalisation factors qCoordsNormalization(rt_w, rt_h, qTemplate, xCentTemplate, yCentTemplate, t_sx, t_sy); PixelCoords *pTemplate = new PixelCoords[rt_w * rt_h]; setPixelCoords(pTemplate, rt_w, rt_h); pCoordsNormalisation(rt_w, rt_h, pTemplate, xCentTemplate, yCentTemplate, t_sx, t_sy); // TPS transformation parameters TPSParams tpsParams; float xCentObservation; float yCentObservation; centerOfMass(resizedObservation, ro_w, ro_h, xCentObservation, yCentObservation); PixelCoords *pResizedObservation = new PixelCoords[ro_w * ro_h]; setPixelCoords(pResizedObservation, ro_w, ro_h); float o_sx = 1, o_sy = 1; // Normalisation factors pCoordsNormalisation(ro_w, ro_h, pResizedObservation, xCentObservation, yCentObservation, o_sx, o_sy); // Pack the parameters for the lmmin() objective function int sizePar = 6 + (2 * DIM_C_REF * DIM_C_REF); double *par = new double[sizePar]; // Pack the affineParam for (int i = 0; i < 6; i++) { par[i] = tpsParams.affineParam[i]; } // Pack the localCoeff for (int i = 0; i < 2 * DIM_C_REF * DIM_C_REF; i++) { par[i + 6] = tpsParams.localCoeff[i]; } // Pack the auxiliary data for the lmmin() objective function // Data format (all floats) [number of elements, name]: // 1, rt_w // 1, rt_h // 1, ro_w // 1, ro_h // rt_w * rt_h, templateImg // ro_w * ro_h, observationImg // 81, normalization // 2 * rt_w * rt_h, pTemplate // 8 * rt_w * rt_h, qTemplate // 2 * rt_w * rt_h, pObservation // 1, t_sx, // 1, t_sy, // 1, o_sx // 1, o_sy int sizeData = (4) + (rt_w * rt_h) + (ro_w * ro_h) + (81) + (2 * rt_w * rt_h) + (8 * rt_w * rt_h) + (2 * ro_w * ro_h) + (4); float *data = new float[sizeData]; // current writing position in the data array int offset = 0; // Pack the sizes of the arrays data[offset] = rt_w; data[offset + 1] = rt_h; data[offset + 2] = ro_w; data[offset + 3] = ro_h; // We wrote 4 elements, move the reading position 4 places offset += 4; // Template image array for (int i = 0; i < rt_w * rt_h; i++) { data[offset + i] = resizedTemplate[i]; } offset += rt_w * rt_h; // Observation image array for (int i = 0; i < ro_w * ro_h; i++) { data[offset + i] = resizedObservation[i]; } offset += ro_w * ro_h; // Normalization factors (N_i for eq.22) for (int i = 0; i < 81; i++) { data[offset + i] = normFactor[i]; } offset += 81; // Pixel coordinates of the template // Every element is a struct with two fields: x, y for (int i = 0; i < rt_w * rt_h; i++) { data[offset + 2 * i] = pTemplate[i].x; data[offset + 2 * i + 1] = pTemplate[i].y; } offset += 2 * rt_w * rt_h; // Quad coordinates of the template // Every element has two fields (x,y) that are arrays of four elements // (corners) for (int i = 0; i < rt_w * rt_h; i++) { data[offset + 8 * i] = qTemplate[i].x[0]; data[offset + 8 * i + 1] = qTemplate[i].y[0]; data[offset + 8 * i + 2] = qTemplate[i].x[1]; data[offset + 8 * i + 3] = qTemplate[i].y[1]; data[offset + 8 * i + 4] = qTemplate[i].x[2]; data[offset + 8 * i + 5] = qTemplate[i].y[2]; data[offset + 8 * i + 6] = qTemplate[i].x[3]; data[offset + 8 * i + 7] = qTemplate[i].y[3]; } offset += 8 * rt_w * rt_h; // Pixel coordinates of the observation // Every element is a struct with two fields: x, y for (int i = 0; i < ro_w * ro_h; i++) { data[offset + 2 * i] = pResizedObservation[i].x; data[offset + 2 * i + 1] = pResizedObservation[i].y; } offset += 2 * ro_w * ro_h; // Normalisation factors of the template data[offset] = t_sx; data[offset + 1] = t_sy; offset += 2; // Normalisation factors of the observation data[offset] = o_sx; data[offset + 1] = o_sy; offset += 2; // Configuration parameters for the lmmin() // Number of equations int m_dat = 87; // Parameter collection for tuning the fit procedure. In most cases, the // default &lm_control_double is adequate. If f is only computed with // single-precision accuracy, &lm_control_float should be used. See also // below, NOTES on initializing parameter records. lm_control_struct control = lm_control_float; // Relative error desired in the sum of squares. Recommended setting: somewhat // above machine precision; less if fvec is computed with reduced accuracy. control.ftol = ftol; printf("\nSolver ftol: %f\n", control.ftol); // Relative error between last two approximations. Recommended setting: as // ftol. control.xtol = xtol; printf("Solver xtol: %f\n", control.xtol); // A measure for degeneracy. Recommended setting: as ftol. control.gtol = ftol; printf("Solver gtol: %f\n", control.gtol); // Step used to calculate the Jacobian. Recommended setting: as ftol, but // definitely less than the accuracy of fvec. control.epsilon = epsilon; printf("Solver epsilon: %f\n", control.epsilon); // Initial bound to steps in the outer loop, generally between 0.01 and 100; // recommended value is 100. control.stepbound = stepb; printf("Solver stepbound: %f\n", control.stepbound); // Used to set the maximum number of function evaluations to patience*n_par. control.patience = patience; printf("Solver patience: %d (%d objective function calls)\n", control.patience, control.patience * 56); // Logical switch (0 or 1). If 1, then scale parameters to their initial // value. This is the recommended setting. control.scale_diag = scaled; printf("Solver scale_diag: %d\n", control.scale_diag); // Progress messages will be written to this file. Typically stdout or stderr. // The value NULL will be interpreted as stdout. control.msgfile = NULL; // If nonzero, some progress information from within the LM algorithm is // written to control.stream. control.verbosity = verb; printf("Solver verbosity level: %d\n", control.verbosity); // Status object lm_status_struct status; // Call the lmmin() using the wrapper for the objective function printf("\nSolving the system...\n"); //lmmin(sizePar, par, m_dat, data, lmminObjectiveWrapperGPU, &control, &status); lmmin(sizePar, par, m_dat, data, lmminObjectiveWrapperGPU, &control, &status); printf("Solving completed!\n\n"); // Translate the found vector of parameters to the tpsParams // Unpack the affineParam for (int i = 0; i < 6; i++) { tpsParams.affineParam[i] = par[i]; } // Unpack the localCoeff for (int i = 0; i < 2 * DIM_C_REF * DIM_C_REF; i++) { tpsParams.localCoeff[i] = par[i + 6]; } // compensating for the translation caused by image cropping (see Matlab) float o_tx = -(xCentObservation + observationMargins.top) * o_sx; float o_ty = -(yCentObservation + observationMargins.left) * o_sy; // Denormalize the coefficients for the final transformation for (int j = 0; j < 3; j++) { tpsParams.affineParam[j] /= o_sx; tpsParams.affineParam[3 + j] /= o_sy; } tpsParams.affineParam[2] -= o_tx / o_sx; tpsParams.affineParam[5] -= o_ty / o_sy; for (int j = 0; j < DIM_C_REF * DIM_C_REF; j++) { tpsParams.localCoeff[j] /= o_sx; tpsParams.localCoeff[DIM_C_REF * DIM_C_REF + j] /= o_sy; } // Apply the decided transformation on the normalized quad coordinates of the // template qTPS(rt_w, rt_h, qTemplate, tpsParams, DIM_C_REF); // Find the dimensions needed to fit the registered shape int x_min = 0, x_max = 0, y_min = 0, y_max = 0; for (int i = 0; i < rt_w * rt_h; i++) { for (int q = 0; q < 4; q++) { if (qTemplate[i].x[q] < x_min) x_min = qTemplate[i].x[q]; if (qTemplate[i].x[q] > x_max) x_max = qTemplate[i].x[q]; if (qTemplate[i].y[q] < y_min) y_min = qTemplate[i].y[q]; if (qTemplate[i].y[q] > y_max) y_max = qTemplate[i].y[q]; } } // Dimensions of the full registered shape image int reg_w = x_max - x_min + 1; int reg_h = y_max - y_min + 1; float *registered = new float[reg_w * reg_h]; // TODO: The transfer function requires the output to be pre-initialized. // Change the implementation of the transfer() and remove this. for (int i = 0; i < reg_w * reg_h; i++) { registered[i] = 0; } // Transfer (map) the transformed quad coordinates to pixel coordinates. // Store the result to the pRegistered PixelCoords *pRegistered = new PixelCoords[reg_w * reg_h]; setPixelCoords(pRegistered, reg_w, reg_h); transfer(registered, pRegistered, reg_w, reg_h, resizedTemplate, qTemplate, rt_w, rt_h); // Crop the result Margins registeredMargins; float *resizedRegistered; int rreg_w, rreg_h; cutMargins(registered, reg_w, reg_h, resizedRegistered, rreg_w, rreg_h, registeredMargins); // stop timer here timer.end(); float t = timer.get(); // elapsed time in seconds cout << "time: " << t * 1000 << " ms" << endl; // Convert and show the transformed output cv::Mat resizedImRegistered(rreg_h, rreg_w, CV_32FC1); convert_layered_to_mat(resizedImRegistered, resizedRegistered); showImage("Registered shape", resizedImRegistered, 520, 100); // wait for key inputs cv::waitKey(0); // save input and result cv::imwrite("image_template.png", templateIn * 255.f); // "imwrite" assumes channel range [0,255]*/ printf("Template image was written in the image_template.png.\n"); cv::imwrite("image_observation.png", observationIn * 255.f); printf("Observation image was written in the image_observation.png.\n"); cv::imwrite("image_registered.png", resizedImRegistered * 255.f); printf("Registered shape image was written in the image_registered.png.\n"); // free allocated arrays delete[] observationImg; delete[] templateImg; delete[] registered; delete[] resizedRegistered; delete[] qTemplate; delete[] pTemplate; delete[] pResizedObservation; delete[] pRegistered; delete[] par; delete[] data; // close all opencv windows cvDestroyAllWindows(); return 0; }
8253979e5d3a047b3306fa593868096d726d1132.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // UPFIRDN1D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_upfirdn1D( const T *__restrict__ inp, const T *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, T *__restrict__ out, const int outW ) { const int t { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( size_t tid = t; tid < outW; tid += stride ) { const int x_idx { static_cast<int>( ( tid * down ) / up ) % padded_len }; int h_idx { static_cast<int>( ( tid * down ) % up * h_per_phase ) }; int x_conv_idx { x_idx - h_per_phase + 1 }; if ( x_conv_idx < 0 ) { h_idx -= x_conv_idx; x_conv_idx = 0; } T temp {}; for ( int x_c = x_conv_idx; x_c < ( x_idx + 1 ); x_c++ ) { if ( x_c < x_shape_a && x_c >= 0 ) { temp += inp[x_c] * h_trans_flip[h_idx]; } h_idx += 1; } out[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float32( const float *__restrict__ inp, const float *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, float *__restrict__ out, const int outW ) { _cupy_upfirdn1D<float>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float64( const double *__restrict__ inp, const double *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, double *__restrict__ out, const int outW ) { _cupy_upfirdn1D<double>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_complex64( const thrust::complex<float> *__restrict__ inp, const thrust::complex<float> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_upfirdn1D<thrust::complex<float>>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_complex128( const thrust::complex<double> *__restrict__ inp, const thrust::complex<double> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_upfirdn1D<thrust::complex<double>>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } /////////////////////////////////////////////////////////////////////////////// // UPFIRDN2D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_upfirdn2D( const T *__restrict__ inp, const int inpH, const T *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, T *__restrict__ out, const int outW, const int outH ) { const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) }; const int stride_y { static_cast<int>( blockDim.x * gridDim.x ) }; const int stride_x { static_cast<int>( blockDim.y * gridDim.y ) }; for (int x = tx; x < outH; x += stride_x) { for (int y = ty; y < outW; y += stride_y) { int x_idx {}; int h_idx {}; if ( axis == 1 ) { x_idx = ( static_cast<int>( x * down ) / up ) % padded_len; h_idx = ( x * down ) % up * h_per_phase; } else { x_idx = ( static_cast<int>( y * down ) / up ) % padded_len; h_idx = ( y * down ) % up * h_per_phase; } int x_conv_idx { x_idx - h_per_phase + 1 }; if ( x_conv_idx < 0 ) { h_idx -= x_conv_idx; x_conv_idx = 0; } T temp {}; for ( int x_c = x_conv_idx; x_c < ( x_idx + 1 ); x_c++ ) { if ( x_c < x_shape_a && x_c >= 0 ) { if ( axis == 1 ) { temp += inp[y * inpH + x_c] * h_trans_flip[h_idx]; } else { temp += inp[x_c * inpH + x] * h_trans_flip[h_idx]; } } h_idx += 1; } out[y * outH + x] = temp; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float32( const float *__restrict__ inp, const int inpH, const float *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, float *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<float>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float64( const double *__restrict__ inp, const int inpH, const double *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, double *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<double>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_complex64( const thrust::complex<float> *__restrict__ inp, const int inpH, const thrust::complex<float> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<float> *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<thrust::complex<float>>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_complex128( const thrust::complex<double> *__restrict__ inp, const int inpH, const thrust::complex<double> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<double> *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<thrust::complex<double>>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); }
8253979e5d3a047b3306fa593868096d726d1132.cu
// Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // UPFIRDN1D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_upfirdn1D( const T *__restrict__ inp, const T *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, T *__restrict__ out, const int outW ) { const int t { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( size_t tid = t; tid < outW; tid += stride ) { const int x_idx { static_cast<int>( ( tid * down ) / up ) % padded_len }; int h_idx { static_cast<int>( ( tid * down ) % up * h_per_phase ) }; int x_conv_idx { x_idx - h_per_phase + 1 }; if ( x_conv_idx < 0 ) { h_idx -= x_conv_idx; x_conv_idx = 0; } T temp {}; for ( int x_c = x_conv_idx; x_c < ( x_idx + 1 ); x_c++ ) { if ( x_c < x_shape_a && x_c >= 0 ) { temp += inp[x_c] * h_trans_flip[h_idx]; } h_idx += 1; } out[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float32( const float *__restrict__ inp, const float *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, float *__restrict__ out, const int outW ) { _cupy_upfirdn1D<float>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float64( const double *__restrict__ inp, const double *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, double *__restrict__ out, const int outW ) { _cupy_upfirdn1D<double>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_complex64( const thrust::complex<float> *__restrict__ inp, const thrust::complex<float> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<float> *__restrict__ out, const int outW ) { _cupy_upfirdn1D<thrust::complex<float>>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_complex128( const thrust::complex<double> *__restrict__ inp, const thrust::complex<double> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<double> *__restrict__ out, const int outW ) { _cupy_upfirdn1D<thrust::complex<double>>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW ); } /////////////////////////////////////////////////////////////////////////////// // UPFIRDN2D // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_upfirdn2D( const T *__restrict__ inp, const int inpH, const T *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, T *__restrict__ out, const int outW, const int outH ) { const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) }; const int stride_y { static_cast<int>( blockDim.x * gridDim.x ) }; const int stride_x { static_cast<int>( blockDim.y * gridDim.y ) }; for (int x = tx; x < outH; x += stride_x) { for (int y = ty; y < outW; y += stride_y) { int x_idx {}; int h_idx {}; if ( axis == 1 ) { x_idx = ( static_cast<int>( x * down ) / up ) % padded_len; h_idx = ( x * down ) % up * h_per_phase; } else { x_idx = ( static_cast<int>( y * down ) / up ) % padded_len; h_idx = ( y * down ) % up * h_per_phase; } int x_conv_idx { x_idx - h_per_phase + 1 }; if ( x_conv_idx < 0 ) { h_idx -= x_conv_idx; x_conv_idx = 0; } T temp {}; for ( int x_c = x_conv_idx; x_c < ( x_idx + 1 ); x_c++ ) { if ( x_c < x_shape_a && x_c >= 0 ) { if ( axis == 1 ) { temp += inp[y * inpH + x_c] * h_trans_flip[h_idx]; } else { temp += inp[x_c * inpH + x] * h_trans_flip[h_idx]; } } h_idx += 1; } out[y * outH + x] = temp; } } } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float32( const float *__restrict__ inp, const int inpH, const float *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, float *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<float>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float64( const double *__restrict__ inp, const int inpH, const double *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, double *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<double>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_complex64( const thrust::complex<float> *__restrict__ inp, const int inpH, const thrust::complex<float> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<float> *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<thrust::complex<float>>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); } extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_complex128( const thrust::complex<double> *__restrict__ inp, const int inpH, const thrust::complex<double> *__restrict__ h_trans_flip, const int up, const int down, const int axis, const int x_shape_a, const int h_per_phase, const int padded_len, thrust::complex<double> *__restrict__ out, const int outW, const int outH ) { _cupy_upfirdn2D<thrust::complex<double>>( inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH ); }
262c5b66e1e79d91e4128d0ef5ce87d6d64b03c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define BLOCK_SIZE 1024 #define GRID_SIZE 65535 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #define EXPANDED_SIZE(__x) (__x+(__x>>LOG_NUM_BANKS)+(__x>>(2*LOG_NUM_BANKS))) //////////////////////////////////////////////////////////////////////////////// // Kernels //////////////////////////////////////////////////////////////////////////////// __global__ void scan_L1_kernel(unsigned int n, unsigned int* data, unsigned int* inter) { __shared__ unsigned int s_data[EXPANDED_SIZE(BLOCK_SIZE)]; unsigned int thid = threadIdx.x; unsigned int g_ai = blockIdx.x*2*blockDim.x + threadIdx.x; unsigned int g_bi = g_ai + blockDim.x; unsigned int s_ai = thid; unsigned int s_bi = thid + blockDim.x; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = (g_ai < n) ? data[g_ai] : 0; s_data[s_bi] = (g_bi < n) ? data[g_bi] : 0; unsigned int stride = 1; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } if (thid == 0){ unsigned int last = blockDim.x*2 -1; last += CONFLICT_FREE_OFFSET(last); inter[blockIdx.x] = s_data[last]; s_data[last] = 0; } for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); if (g_ai < n) { data[g_ai] = s_data[s_ai]; } if (g_bi < n) { data[g_bi] = s_data[s_bi]; } } __global__ void scan_inter1_kernel(unsigned int* data, unsigned int iter) { extern __shared__ unsigned int s_data[]; unsigned int thid = threadIdx.x; unsigned int gthid = (blockIdx.x*blockDim.x + threadIdx.x); unsigned int gi = 2*iter*gthid; unsigned int g_ai = gi + iter - 1; unsigned int g_bi = g_ai + iter; unsigned int s_ai = 2*thid; unsigned int s_bi = 2*thid + 1; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = data[g_ai]; s_data[s_bi] = data[g_bi]; unsigned int stride = 1; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } __syncthreads(); data[g_ai] = s_data[s_ai]; data[g_bi] = s_data[s_bi]; } __global__ void scan_inter2_kernel(unsigned int* data, unsigned int iter) { extern __shared__ unsigned int s_data[]; unsigned int thid = threadIdx.x; unsigned int gthid = (blockIdx.x*blockDim.x + threadIdx.x); unsigned int gi = 2*iter*gthid; unsigned int g_ai = gi + iter - 1; unsigned int g_bi = g_ai + iter; unsigned int s_ai = 2*thid; unsigned int s_bi = 2*thid + 1; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = data[g_ai]; s_data[s_bi] = data[g_bi]; unsigned int stride = blockDim.x*2; for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); data[g_ai] = s_data[s_ai]; data[g_bi] = s_data[s_bi]; } __global__ void uniformAdd(unsigned int n, unsigned int *data, unsigned int *inter) { __shared__ unsigned int uni; if (threadIdx.x == 0) { uni = inter[blockIdx.x]; } __syncthreads(); unsigned int g_ai = blockIdx.x*2*blockDim.x + threadIdx.x; unsigned int g_bi = g_ai + blockDim.x; if (g_ai < n) { data[g_ai] += uni; } if (g_bi < n) { data[g_bi] += uni; } } void scanLargeArray( unsigned int gridNumElements, unsigned int* data_d) { unsigned int gridNumElems = gridNumElements; // allocate device memory input and output arrays unsigned int* inter_d = NULL; // Run the prescan unsigned int size = (gridNumElems+BLOCK_SIZE-1)/BLOCK_SIZE; unsigned int dim_block; unsigned int current_max = size*BLOCK_SIZE; for (int block_size = 128; block_size <= BLOCK_SIZE; block_size *= 2){ unsigned int array_size = block_size; while(array_size < size){ array_size *= block_size; } if (array_size <= current_max){ current_max = array_size; dim_block = block_size; } } hipMalloc( (void**) &inter_d, current_max*sizeof(unsigned int)); hipMemset (inter_d, 0, current_max*sizeof(unsigned int)); for (unsigned int i=0; i < (size+GRID_SIZE-1)/GRID_SIZE; i++){ unsigned int gridSize = ((size-(i*GRID_SIZE)) > GRID_SIZE) ? GRID_SIZE : (size-i*GRID_SIZE); unsigned int numElems = ((gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)) > (GRID_SIZE*BLOCK_SIZE)) ? (GRID_SIZE*BLOCK_SIZE) : (gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)); dim3 block (BLOCK_SIZE/2); dim3 grid (gridSize); hipLaunchKernelGGL(( scan_L1_kernel), dim3(grid), dim3(block), 0, 0, numElems, data_d+(i*GRID_SIZE*BLOCK_SIZE), inter_d+(i*GRID_SIZE)); } unsigned int stride = 1; for (unsigned int d = current_max; d > 1; d /= dim_block) { dim3 block (dim_block/2); dim3 grid (d/dim_block); hipLaunchKernelGGL(( scan_inter1_kernel), dim3(grid), dim3(block), EXPANDED_SIZE(dim_block)*sizeof(unsigned int), 0, inter_d, stride); stride *= dim_block; } hipMemset(&(inter_d[current_max-1]), 0, sizeof(unsigned int)); for (unsigned int d = dim_block; d <= current_max; d *= dim_block) { stride /= dim_block; dim3 block (dim_block/2); dim3 grid (d/dim_block); hipLaunchKernelGGL(( scan_inter2_kernel), dim3(grid), dim3(block), EXPANDED_SIZE(dim_block)*sizeof(unsigned int), 0, inter_d, stride); } for (unsigned int i=0; i < (size+GRID_SIZE-1)/GRID_SIZE; i++){ unsigned int gridSize = ((size-(i*GRID_SIZE)) > GRID_SIZE) ? GRID_SIZE : (size-i*GRID_SIZE); unsigned int numElems = ((gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)) > (GRID_SIZE*BLOCK_SIZE)) ? (GRID_SIZE*BLOCK_SIZE) : (gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)); dim3 block (BLOCK_SIZE/2); dim3 grid (gridSize); hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(block), 0, 0, numElems, data_d+(i*GRID_SIZE*BLOCK_SIZE), inter_d+(i*GRID_SIZE)); } hipFree(inter_d); }
262c5b66e1e79d91e4128d0ef5ce87d6d64b03c3.cu
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define BLOCK_SIZE 1024 #define GRID_SIZE 65535 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #define EXPANDED_SIZE(__x) (__x+(__x>>LOG_NUM_BANKS)+(__x>>(2*LOG_NUM_BANKS))) //////////////////////////////////////////////////////////////////////////////// // Kernels //////////////////////////////////////////////////////////////////////////////// __global__ void scan_L1_kernel(unsigned int n, unsigned int* data, unsigned int* inter) { __shared__ unsigned int s_data[EXPANDED_SIZE(BLOCK_SIZE)]; unsigned int thid = threadIdx.x; unsigned int g_ai = blockIdx.x*2*blockDim.x + threadIdx.x; unsigned int g_bi = g_ai + blockDim.x; unsigned int s_ai = thid; unsigned int s_bi = thid + blockDim.x; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = (g_ai < n) ? data[g_ai] : 0; s_data[s_bi] = (g_bi < n) ? data[g_bi] : 0; unsigned int stride = 1; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } if (thid == 0){ unsigned int last = blockDim.x*2 -1; last += CONFLICT_FREE_OFFSET(last); inter[blockIdx.x] = s_data[last]; s_data[last] = 0; } for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); if (g_ai < n) { data[g_ai] = s_data[s_ai]; } if (g_bi < n) { data[g_bi] = s_data[s_bi]; } } __global__ void scan_inter1_kernel(unsigned int* data, unsigned int iter) { extern __shared__ unsigned int s_data[]; unsigned int thid = threadIdx.x; unsigned int gthid = (blockIdx.x*blockDim.x + threadIdx.x); unsigned int gi = 2*iter*gthid; unsigned int g_ai = gi + iter - 1; unsigned int g_bi = g_ai + iter; unsigned int s_ai = 2*thid; unsigned int s_bi = 2*thid + 1; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = data[g_ai]; s_data[s_bi] = data[g_bi]; unsigned int stride = 1; for (unsigned int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } __syncthreads(); data[g_ai] = s_data[s_ai]; data[g_bi] = s_data[s_bi]; } __global__ void scan_inter2_kernel(unsigned int* data, unsigned int iter) { extern __shared__ unsigned int s_data[]; unsigned int thid = threadIdx.x; unsigned int gthid = (blockIdx.x*blockDim.x + threadIdx.x); unsigned int gi = 2*iter*gthid; unsigned int g_ai = gi + iter - 1; unsigned int g_bi = g_ai + iter; unsigned int s_ai = 2*thid; unsigned int s_bi = 2*thid + 1; s_ai += CONFLICT_FREE_OFFSET(s_ai); s_bi += CONFLICT_FREE_OFFSET(s_bi); s_data[s_ai] = data[g_ai]; s_data[s_bi] = data[g_bi]; unsigned int stride = blockDim.x*2; for (unsigned int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { unsigned int i = 2*stride*thid; unsigned int ai = i + stride - 1; unsigned int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } __syncthreads(); data[g_ai] = s_data[s_ai]; data[g_bi] = s_data[s_bi]; } __global__ void uniformAdd(unsigned int n, unsigned int *data, unsigned int *inter) { __shared__ unsigned int uni; if (threadIdx.x == 0) { uni = inter[blockIdx.x]; } __syncthreads(); unsigned int g_ai = blockIdx.x*2*blockDim.x + threadIdx.x; unsigned int g_bi = g_ai + blockDim.x; if (g_ai < n) { data[g_ai] += uni; } if (g_bi < n) { data[g_bi] += uni; } } void scanLargeArray( unsigned int gridNumElements, unsigned int* data_d) { unsigned int gridNumElems = gridNumElements; // allocate device memory input and output arrays unsigned int* inter_d = NULL; // Run the prescan unsigned int size = (gridNumElems+BLOCK_SIZE-1)/BLOCK_SIZE; unsigned int dim_block; unsigned int current_max = size*BLOCK_SIZE; for (int block_size = 128; block_size <= BLOCK_SIZE; block_size *= 2){ unsigned int array_size = block_size; while(array_size < size){ array_size *= block_size; } if (array_size <= current_max){ current_max = array_size; dim_block = block_size; } } cudaMalloc( (void**) &inter_d, current_max*sizeof(unsigned int)); cudaMemset (inter_d, 0, current_max*sizeof(unsigned int)); for (unsigned int i=0; i < (size+GRID_SIZE-1)/GRID_SIZE; i++){ unsigned int gridSize = ((size-(i*GRID_SIZE)) > GRID_SIZE) ? GRID_SIZE : (size-i*GRID_SIZE); unsigned int numElems = ((gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)) > (GRID_SIZE*BLOCK_SIZE)) ? (GRID_SIZE*BLOCK_SIZE) : (gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)); dim3 block (BLOCK_SIZE/2); dim3 grid (gridSize); scan_L1_kernel<<<grid, block>>>(numElems, data_d+(i*GRID_SIZE*BLOCK_SIZE), inter_d+(i*GRID_SIZE)); } unsigned int stride = 1; for (unsigned int d = current_max; d > 1; d /= dim_block) { dim3 block (dim_block/2); dim3 grid (d/dim_block); scan_inter1_kernel<<<grid, block, EXPANDED_SIZE(dim_block)*sizeof(unsigned int)>>>(inter_d, stride); stride *= dim_block; } cudaMemset(&(inter_d[current_max-1]), 0, sizeof(unsigned int)); for (unsigned int d = dim_block; d <= current_max; d *= dim_block) { stride /= dim_block; dim3 block (dim_block/2); dim3 grid (d/dim_block); scan_inter2_kernel<<<grid, block, EXPANDED_SIZE(dim_block)*sizeof(unsigned int)>>>(inter_d, stride); } for (unsigned int i=0; i < (size+GRID_SIZE-1)/GRID_SIZE; i++){ unsigned int gridSize = ((size-(i*GRID_SIZE)) > GRID_SIZE) ? GRID_SIZE : (size-i*GRID_SIZE); unsigned int numElems = ((gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)) > (GRID_SIZE*BLOCK_SIZE)) ? (GRID_SIZE*BLOCK_SIZE) : (gridNumElems-(i*GRID_SIZE*BLOCK_SIZE)); dim3 block (BLOCK_SIZE/2); dim3 grid (gridSize); uniformAdd<<<grid, block>>>(numElems, data_d+(i*GRID_SIZE*BLOCK_SIZE), inter_d+(i*GRID_SIZE)); } cudaFree(inter_d); }
4a860d58d97dc27daeebdc6adcb55bd1b0e7a244.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "include/BC.cuh" #include "include/utils.cuh" #include "../include/macros.h" __device__ void OBC(prec* localf, const prec* __restrict__ f, int i, int j, int Lx, int Ly){ localf[9*i+j] = f[IDXcm(i, j, Lx, Ly)]; } __device__ void BBBC(prec* localf, int j){ int op[] = {3,4,1,2,7,8,5,6}; localf[j] = localf[op[j-1]]; } __device__ void SBC(prec* localf, int j, unsigned char b1, unsigned char b2){ int op[] = {3,4,1,2,7,8,5,6}; if(j < 5) localf[j] = localf[op[j-1]]; else{ int right[] = {5,6,7,4}; int left[] = {7,4,5,6}; int index = j-5; if (((b1>>(j-1) == b1>>(right[index])) && (b2>>(j-1) == b2>>(right[index]))) && ((b1>>(j-1) != b1>>(left[index] )) || (b2>>(j-1) != b2>>(left[index] )))) localf[j] = localf[left[index]+1]; else if (((b1>>(j-1) == b1>>(left[index] )) && (b2>>(j-1) == b2>>(left[index] ))) && ((b1>>(j-1) != b1>>(right[index])) || (b2>>(j-1) != b2>>(right[index])))) localf[j] = localf[right[index]+1]; else localf[j] = localf[op[j-1]]; } } __device__ void PBC(prec* localf, const prec* __restrict__ f, int i, int j, int Lx, int Ly, int* ex, int* ey){ int y = i/Lx; int x = i - y * Lx; int xop = (Lx + x - ex[j])%Lx; int yop = (Ly + y - ey[j])%Ly; int iop = xop + yop * Lx; localf[j] = f[IDXcm(iop, j, Lx, Ly)]; }
4a860d58d97dc27daeebdc6adcb55bd1b0e7a244.cu
#include <cuda_runtime.h> #include "include/BC.cuh" #include "include/utils.cuh" #include "../include/macros.h" __device__ void OBC(prec* localf, const prec* __restrict__ f, int i, int j, int Lx, int Ly){ localf[9*i+j] = f[IDXcm(i, j, Lx, Ly)]; } __device__ void BBBC(prec* localf, int j){ int op[] = {3,4,1,2,7,8,5,6}; localf[j] = localf[op[j-1]]; } __device__ void SBC(prec* localf, int j, unsigned char b1, unsigned char b2){ int op[] = {3,4,1,2,7,8,5,6}; if(j < 5) localf[j] = localf[op[j-1]]; else{ int right[] = {5,6,7,4}; int left[] = {7,4,5,6}; int index = j-5; if (((b1>>(j-1) == b1>>(right[index])) && (b2>>(j-1) == b2>>(right[index]))) && ((b1>>(j-1) != b1>>(left[index] )) || (b2>>(j-1) != b2>>(left[index] )))) localf[j] = localf[left[index]+1]; else if (((b1>>(j-1) == b1>>(left[index] )) && (b2>>(j-1) == b2>>(left[index] ))) && ((b1>>(j-1) != b1>>(right[index])) || (b2>>(j-1) != b2>>(right[index])))) localf[j] = localf[right[index]+1]; else localf[j] = localf[op[j-1]]; } } __device__ void PBC(prec* localf, const prec* __restrict__ f, int i, int j, int Lx, int Ly, int* ex, int* ey){ int y = i/Lx; int x = i - y * Lx; int xop = (Lx + x - ex[j])%Lx; int yop = (Ly + y - ey[j])%Ly; int iop = xop + yop * Lx; localf[j] = f[IDXcm(iop, j, Lx, Ly)]; }
542c1bfa501454c938bb997a54276c5a20660ab0.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <assert.h> #include <omp.h> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "zfparray3.h" using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = 1024; uint MAXBITS = 1024; uint MAXPREC = 64; int MINEXP = -1074; const double rate = 16; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); hipMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); hipMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); hipMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); hipMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); hipMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); hipMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); hipMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; // forward decorrelating transform template<class Int> __device__ __host__ static void fwd_xform_zy(Int* p) { for (uint z = 0; z < 4; z++) for (uint y = 4; y-- > 0;) cuZFP::fwd_lift<Int, 1>(p + 4 * y + 16 * z); } // forward decorrelating transform template<class Int> __device__ __host__ static void fwd_xform_xz(Int* p) { for (uint x = 4; x-- > 0;) for (uint z = 4; z-- > 0;) cuZFP::fwd_lift<Int, 4>(p + 16 * z + 1 * x); } // forward decorrelating transform template<class Int> __host__ static void fwd_xform_yx(Int* p) { for (uint y = 4; y-- > 0;) for (uint x = 4; x-- > 0;) cuZFP::fwd_lift<Int, 16>(p + 1 * x + 4 * y); } // forward decorrelating transform template<class Int> __host__ static void fwd_xform(Int* p) { fwd_xform_zy(p); fwd_xform_xz(p); fwd_xform_yx(p); } template<class Int, class UInt, class Scalar, uint bsize> void cpuEncode ( dim3 gridDim, dim3 blockDim, const unsigned long long count, uint size, const Scalar* data, const unsigned char *g_cnt, Word *block ) { dim3 blockIdx; for (blockIdx.z = 0; blockIdx.z < gridDim.z; blockIdx.z++){ for (blockIdx.y = 0; blockIdx.y < gridDim.y; blockIdx.y++){ for (blockIdx.x = 0; blockIdx.x <gridDim.x; blockIdx.x++){ Int sh_q[64]; UInt sh_p[64]; uint sh_m[64], sh_n[64]; Bitter sh_bitters[64]; unsigned char sh_sbits[64]; uint mx = blockIdx.x, my = blockIdx.y, mz = blockIdx.z; mx *= 4; my *= 4; mz *= 4; int emax = cuZFP::max_exp_block(data, mx, my, mz, 1, blockDim.x * gridDim.x, gridDim.x * gridDim.y * blockDim.x * blockDim.y); // uint sz = gridDim.x*blockDim.x * 4 * gridDim.y*blockDim.y * 4; // uint sy = gridDim.x*blockDim.x * 4; // uint sx = 1; cuZFP::fixed_point_block<Int, Scalar, intprec>(sh_q, data, emax, mx, my, mz, 1, blockDim.x * gridDim.x, gridDim.x * gridDim.y * blockDim.x * blockDim.y); fwd_xform(sh_q); //fwd_order for (int i = 0; i < 64; i++){ sh_p[i] = cuZFP::int2uint<Int, UInt>(sh_q[perm[i]]); } uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); //cuZFP::Bit<bsize> stream(block + bidx*bsize); unsigned long long x[64], y[64]; Bitter bitter[64]; for (int i = 0; i < 64; i++){ bitter[i] = make_bitter(0, 0); } uint s_emax_bits[1]; s_emax_bits[0] = 1; //maxprec, minexp, EBITS // uint k = threadIdx.x + blockDim.x * blockIdx.x; int maxprec = cuZFP::precision(emax, MAXPREC, MINEXP); int ebits = EBITS + 1; const uint kmin = intprec > maxprec ? intprec - maxprec : 0; uint e = maxprec ? emax + EBIAS : 0; //printf("%d %d %d %d\n", emax, maxprec, ebits, e); if (e){ //write_bitters(bitter[0], make_bitter(2 * e + 1, 0), ebits, sbit[0]); block[bidx*bsize] = 2 * e + 1; //stream[bidx].write_bits(2 * e + 1, ebits); s_emax_bits[0] = ebits; } // const uint kmin = intprec > MAXPREC ? intprec - MAXPREC : 0; //unsigned long long x[64]; #pragma omp parallel for for (int tid = 0; tid<64; tid++){ /* step 1: extract bit plane #k to x */ x[tid] = 0; for (int i = 0; i < size; i++) x[tid] += (uint64)((sh_p[i] >> tid) & 1u) << i; y[tid] = x[tid]; } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ sh_m[tid] = 0; sh_n[tid] = 0; sh_sbits[tid] = 0; } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ //get the index of the first 'one' in the bit plane for (int i = 0; i < 64; i++){ if (!!(x[tid] >> i)) sh_n[tid] = i + 1; } } for (int i = 0; i < 63; i++){ sh_m[i] = sh_n[i + 1]; } //make sure that m increases isotropically for (int i = intprec - 1; i-- > 0;){ if (sh_m[i] < sh_m[i + 1]) sh_m[i] = sh_m[i + 1]; } //compute the number of bits used per thread int bits[64]; #pragma omp parallel for for (int tid = 0; tid < 64; tid++) { bits[tid] = 128; int n = 0; /* step 2: encode first n bits of bit plane */ bits[tid] -= sh_m[tid]; x[tid] >>= sh_m[tid]; x[tid] = (sh_m[tid] != 64) * x[tid]; n = sh_m[tid]; /* step 3: unary run-length encode remainder of bit plane */ for (; n < size && bits[tid] && (bits[tid]--, !!x[tid]); x[tid] >>= 1, n++) for (; n < size - 1 && bits[tid] && (bits[tid]--, !(x[tid] & 1u)); x[tid] >>= 1, n++) ; } //number of bits read per thread //#pragma omp parallel for for (int tid = 0; tid < 64; tid++){ bits[tid] = (128 - bits[tid]); } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ sh_n[tid] = min(sh_m[tid], bits[tid]); } #pragma omp parallel for for (int tid = 0; tid < 64; tid++) { /* step 2: encode first n bits of bit plane */ unsigned char sbits = 0; //y[tid] = stream[bidx].write_bits(y[tid], sh_m[tid]); y[tid] = write_bitters(bitter[tid], make_bitter(y[tid], 0), sh_m[tid], sbits); uint n = sh_n[tid]; /* step 3: unary run-length encode remainder of bit plane */ for (; n < size && bits[tid] && (bits[tid]-- && write_bitter(bitter[tid], !!y[tid], sbits)); y[tid] >>= 1, n++) for (; n < size - 1 && bits[tid] && (bits[tid]-- && !write_bitter(bitter[tid], y[tid] & 1u, sbits)); y[tid] >>= 1, n++) ; sh_bitters[63 - tid] = bitter[tid]; sh_sbits[63 - tid] = sbits; } uint rem_sbits = s_emax_bits[0]; uint tot_sbits = s_emax_bits[0]; uint offset = 0; for (int i = 0; i < intprec && tot_sbits < MAXBITS; i++){ if (sh_sbits[i] <= 64){ write_outx<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, sh_sbits[i]); } else{ write_outx<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, 64); if (tot_sbits < MAXBITS) write_outy<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, sh_sbits[i] - 64); } } } } } } template<class Int, class UInt, class Scalar, uint bsize, uint num_sidx> void cpuDecode ( dim3 gridDim, dim3 blockDim, size_t *sidx, Word *block, Scalar *out, const unsigned long long orig_count ) { dim3 blockIdx; for (blockIdx.z = 0; blockIdx.z < gridDim.z; blockIdx.z++){ for (blockIdx.y = 0; blockIdx.y < gridDim.y; blockIdx.y++){ for (blockIdx.x = 0; blockIdx.x < gridDim.x; blockIdx.x++){ uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; cuZFP::Bit<bsize> stream; size_t s_sidx[64];// = (size_t*)&smem[0]; //if (tid < num_sidx) for (int tid = 0; tid < num_sidx; tid++){ s_sidx[tid] = sidx[tid]; } uint s_idx_n[64];// = (uint*)&smem[s_sidx[0]]; uint s_idx_g[64];// = (uint*)&smem[s_sidx[1]]; unsigned long long s_bit_cnt[64];// = (unsigned long long*)&smem[s_sidx[2]]; uint s_bit_rmn_bits[64];// = (uint*)&smem[s_sidx[3]]; char s_bit_offset[64];// = (char*)&smem[s_sidx[4]]; uint s_bit_bits[64];// = (uint*)&smem[s_sidx[5]]; Word s_bit_buffer[64];// = (Word*)&smem[s_sidx[6]]; UInt s_data[64];// = (UInt*)&smem[s_sidx[7]]; Int s_q[64]; uint s_kmin[1]; int s_emax[1]; //stream[idx].rewind(); stream.read_bit(); uint ebits = EBITS + 1; s_emax[0] = stream.read_bits(ebits - 1) - EBIAS; int maxprec = cuZFP::precision(s_emax[0], MAXPREC, MINEXP); s_kmin[0] = intprec > maxprec ? intprec - maxprec : 0; for (int tid = 0; tid < size; tid++) s_data[tid] = 0; uint bits = MAXBITS - ebits; unsigned long long x[64]; int *sh_idx = new int[bsize*64]; int *sh_tmp_idx = new int[bsize * 64]; for (int tid = 0; tid < 64; tid++){ for (int i = 0; i < 16; i++){ sh_idx[i * 64 + tid] = -1; sh_tmp_idx[i * 64 + tid] = -1; } } int sh_cnt[bsize]; int beg_idx[bsize]; for (int tid = 0; tid < 64; tid++){ if (tid < bsize){ beg_idx[tid] = 0; if (tid == 0) beg_idx[tid] = ebits; sh_cnt[tid] = 0; for (int i = beg_idx[tid]; i < 64; i++){ if ((stream.begin[tid] >> i) & 1u){ sh_tmp_idx[tid * 64 + sh_cnt[tid]++] = tid*64 + i; } } } } //fix blocks since they are off by ebits for (int i = 0; i < bsize; i++){ for (int tid = 0; tid < 64; tid++){ if (tid < sh_cnt[i]){ sh_tmp_idx[i*64 + tid] -= ebits; } } } for (int tid = 0; tid < 64; tid++){ if (tid < sh_cnt[0]) sh_idx[tid] = sh_tmp_idx[tid]; } for (int i = 1; i < bsize; i++){ for (int tid = 0; tid < 64; tid++){ if (tid == 0) sh_cnt[i] += sh_cnt[i - 1]; if (tid < sh_cnt[i]){ sh_idx[sh_cnt[i - 1] + tid] = sh_tmp_idx[i * 64 + tid]; } } } /* decode one bit plane at a time from MSB to LSB */ int cnt = 0; //uint new_n = 0; uint bits_cnt = ebits; for (uint tid = intprec, n = 0; bits && tid-- > s_kmin[0];) { /* decode first n bits of bit plane #k */ uint m = MIN(n, bits); bits -= m; bits_cnt += m; x[tid] = stream.read_bits(m); /* unary run-length decode remainder of bit plane */ for (; n < size && bits && (bits--, bits_cnt++, stream.read_bit()); x[tid] += (uint64)1 << n++){ int num_bits = 0; uint chk = 0; //uint tmp_bits = stream[idx].bits; //Word tmp_buffer = stream[idx].buffer; //char tmp_offset = stream[idx].offset; //for (; n < size - 1 && bits && (bits--, !stream[idx].read_bit()); n++) // ; //stream[idx].bits = tmp_bits; //stream[idx].buffer = tmp_buffer; //stream[idx].offset = tmp_offset; while (n < size - 1 && bits && (bits--, bits_cnt++, !stream.read_bit())){ //the number of bits read in one go: //this can be affected by running out of bits in the block (variable bits) // and how much is encoded per number (variable n) // and how many zeros there are since the last one bit. // Finally, the last bit isn't read because we'll check it to see // where we are /* fast forward to the next one bit that hasn't been read yet*/ while (sh_idx[cnt] < bits_cnt - ebits){ cnt++; } cnt--; //compute the raw number of bits between the last one bit and the current one bit num_bits = sh_idx[cnt + 1] - sh_idx[cnt]; //the one bit as two positions previous num_bits -= 2; num_bits = min(num_bits, (size - 1) - n - 1); bits_cnt += num_bits; if (num_bits > 0){ stream.read_bits(num_bits); bits -= num_bits; n += num_bits; } n++; } //if (n != new_n || new_bits != bits){ // cout << n << " " << new_n << " " << bits << " " << new_bits << " " << blockIdx.x * gridDim.x << " " << blockIdx.y*gridDim.y << " " << blockIdx.z * gridDim.z << endl; // exit(0); //} } /* deposit bit plane from x */ for (int i = 0; x[tid]; i++, x[tid] >>= 1) s_data[i] += (UInt)(x[tid] & 1u) << tid; } for (int tid = 0; tid < 64; tid++){ s_q[perm[tid]] = cuZFP::uint2int<Int, UInt>(s_data[tid]); } uint mx = blockIdx.x, my = blockIdx.y, mz = blockIdx.z; mx *= 4; my *= 4; mz *= 4; cuZFP::inv_xform(s_q); cuZFP::inv_cast<Int, Scalar>(s_q, out, s_emax[0], mx, my, mz, 1, gridDim.x*blockDim.x, gridDim.x*blockDim.x * gridDim.y*blockDim.y); } } } } template<class Int, class UInt, class Scalar, uint bsize> void gpuTest ( host_vector<Scalar> &h_data ) { host_vector<int> h_emax; host_vector<UInt> h_p; host_vector<Int> h_q; host_vector<UInt> h_buf(nx*ny*nz); host_vector<cuZFP::Bit<bsize> > h_bits; device_vector<unsigned char> d_g_cnt; device_vector<Scalar> data; data = h_data; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; hipEvent_t start, stop; float millisecs; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, data, block, group_count, size); hipStreamSynchronize(0); ec.chk("cudaEncode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; thrust::host_vector<Word > cpu_block; cpu_block = block; UInt sum = 0; for (int i = 0; i < cpu_block.size(); i++){ sum += cpu_block[i]; } cout << "encode UInt sum: " << sum << endl; hipMemset(thrust::raw_pointer_cast(data.data()), 0, sizeof(Scalar)*data.size()); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, block, data, group_count); ec.chk("cudaDecode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); cout << "decode parallel GPU in time: " << millisecs/1000.0 << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; host_vector<Scalar> h_out = data; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1 ) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; //gpuValidate<Int, UInt, Scalar, bsize>(h_data, q, data); } template<class Int, class UInt, class Scalar, uint bsize> void cpuTestBitStream ( host_vector<Scalar> &h_data ) { host_vector<int> h_emax; host_vector<UInt> h_p; host_vector<Int> h_q; host_vector<UInt> h_buf(nx*ny*nz); host_vector<Word > h_bits; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; host_vector<Word > cpu_block(emax_size.x * emax_size.y * emax_size.z * bsize); block_size = dim3(4, 4, 4); grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; unsigned long long count = group_count; host_vector<unsigned char> g_cnt(10); uint sum = 0; g_cnt[0] = 0; for (int i = 1; i < 10; i++){ sum += count & 0xf; g_cnt[i] = sum; count >>= 4; } cpuEncode<Int, UInt, Scalar, bsize>( grid_size, block_size, group_count, size, thrust::raw_pointer_cast(h_data.data()), thrust::raw_pointer_cast(g_cnt.data()), thrust::raw_pointer_cast(cpu_block.data())); unsigned long long block_sum = 0; for (int i = 0; i < cpu_block.size(); i++){ block_sum += cpu_block[i]; } cout << "encode UInt sum: " << block_sum << endl; host_vector<Scalar> h_out(nx*ny* nz); block_size = dim3(4, 4, 4); grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; size_t blcksize = block_size.x *block_size.y * block_size.z; size_t s_idx[12] = { sizeof(size_t) * 12, blcksize * sizeof(uint), blcksize * sizeof(uint), +blcksize * sizeof(unsigned long long), blcksize * sizeof(uint), blcksize * sizeof(char), blcksize * sizeof(uint), blcksize * sizeof(Word), blcksize * sizeof(UInt), blcksize * sizeof(Int), sizeof(uint), sizeof(int) }; thrust::inclusive_scan(s_idx, s_idx + 11, s_idx); const size_t shmem_size = thrust::reduce(s_idx, s_idx + 11); cpuDecode < Int, UInt, Scalar, bsize, 9 > (grid_size, block_size, s_idx, raw_pointer_cast(cpu_block.data()), raw_pointer_cast(h_out.data()), group_count); double tot_sum = 0, max_diff = 0, min_diff = 1e16; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl; //gpuValidate<Int, UInt, Scalar, bsize>(h_data, q, data); } template<typename Scalar> void zfpTest ( host_vector<Scalar> &h_data ) { double start_time = omp_get_wtime(); #if 0 //doing the compression with sextuple nested for loop is actually //faster than the array3d way. zfp_field* field = zfp_field_alloc(); zfp_field_set_type(field, zfp::codec<Scalar>::type); zfp_field_set_pointer(field, thrust::raw_pointer_cast(h_data.data())); zfp_field_set_size_3d(field, nx, ny, nz); zfp_stream* stream = zfp_stream_open(0); uint n = zfp_field_size(field, NULL); uint dims = zfp_field_dimensionality(field); zfp_type type = zfp_field_type(field); Scalar new_rate = zfp_stream_set_rate(stream, rate, type, dims, 0); size_t bufsize = zfp_stream_maximum_size(stream, field); uchar* buffer = new uchar[bufsize]; bitstream* s = stream_open(buffer, bufsize); zfp_stream_set_bit_stream(stream, s); zfp_stream_rewind(stream); int m = 0; for (int z = 0; z < nz; z += 4){ for (int y = 0; y < ny; y += 4){ for (int x = 0; x < nx; x += 4){ Scalar b[64]; m = 0; for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ for (int k = 0; k < 4; k++, m++){ b[m] = h_data[(z + i)*nx*ny + (y + j)*nx + x + k]; } } } zfp_encode_block_double_3(stream, b); } } } Scalar time = omp_get_wtime() - start_time; cout << "encode time: " << time << endl; //cout << "sum UInt " << thrust::reduce(stream->begin, stream->end) << endl; stream_flush(s); host_vector<Scalar> h_out(nx*ny*nz); stream_rewind(s); start_time = omp_get_wtime(); for (int z = 0; z < nz; z += 4){ for (int y = 0; y < ny; y += 4){ for (int x = 0; x < nx; x += 4){ m = 0; Scalar b[64]; zfp_decode_block_double_3(stream, b); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ for (int k = 0; k < 4; k++, m++){ h_out[(z + i)*nx*ny + (y + j)*nx + x + k] = b[m]; } } } } } } #else zfp::array3d u(nx, ny, nz, rate); //this isn't any faster than straight 0 to n-1 //for (int z = 0; z < nz; z++){ // for (int y = 0; y < nz; y++){ // for (int x = 0; x < nx; x++){ // u[x + y * nx + z * nx * ny] = h_data[x + y * nx + z * nx * ny]; // } // } //} for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_data[i]; } double time = omp_get_wtime() - start_time; cout << "encode time: " << time << endl; start_time = omp_get_wtime(); host_vector<Scalar> h_out(nx*ny*nz); for (int i = 0; i < nx*ny*nz; i++){ h_out[i] = u[i]; } time = omp_get_wtime() - start_time; cout << "decode time: " << time << endl; #endif Scalar tot_diff = 0; for (int i = 0; i < nx*ny*nz; i++){ Scalar diff = fabs(h_data[i] - h_out[i]); tot_diff += diff; } cout << "tot diff: " << tot_diff << " average diff: " << tot_diff / (float)h_out.size() << endl;// " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum : " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl; } template<class Int, class UInt, class Scalar, uint bsize> void mixedTestCPUtoGPU ( host_vector<Scalar> &h_data ) { dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; hipEvent_t start, stop; float millisecs; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_data[i]; } double time = omp_get_wtime() - start_time; u.flush_cache(); cout << "encode time: " << time << endl; unsigned char * cd = u.compressed_data(); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); host_vector<Word> h_block(emax_size.x * emax_size.y * emax_size.z * bsize); memcpy(h_block.data(), cd, u.compressed_size()); block = h_block; device_vector<Scalar> data(nx*ny*nz, 0.0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, block, data, group_count); ec.chk("cudaDecode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); cout << "decode parallel GPU in time: " << millisecs / 1000.0 << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; host_vector<Scalar> h_out = data; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1 ) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; } template<class Int, class UInt, class Scalar, uint bsize> void mixedTestGPUtoCPU ( host_vector<Scalar> &h_data ) { device_vector<Scalar> data; data = h_data; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; hipEvent_t start, stop; float millisecs; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, data, block, group_count, size); hipStreamSynchronize(0); ec.chk("cudaEncode"); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode mixed GPU in time: " << millisecs / 1000.0 << endl; host_vector<Word> h_block = block; zfp::array3d u(nx, ny, nz, rate); unsigned char *wtf = u.compressed_data(); memcpy(wtf, raw_pointer_cast(h_block.data()), u.compressed_size()); double start_time = omp_get_wtime(); host_vector<Scalar> h_out(nx*ny*nz); for (int i = 0; i < nx*ny*nz; i++){ h_out[i] = u[i]; } double time = omp_get_wtime() - start_time; cout << "decode mixd CPU time: " << time << endl; Scalar tot_diff = 0; for (int i = 0; i < nx*ny*nz; i++){ Scalar diff = fabs(h_data[i] - h_out[i]); tot_diff += diff; } cout << "tot diff: " << tot_diff << " average diff: " << tot_diff / (float)h_out.size() << endl;// " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum : " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; } int main() { host_vector<double> h_vec_in(nx*ny*nz); #if 0 for (int z=0; z<nz; z++){ for (int y=0; y<ny; y++){ for (int x=0; x<nx; x++){ if (x == 0) h_vec_in[z*nx*ny + y*nx + x] = 10; else if(x == nx - 1) h_vec_in[z*nx*ny + y*nx + x] = 0; else h_vec_in[z*nx*ny + y*nx + x] = 5; } } } #else device_vector<double> d_vec_in(nx*ny*nz); thrust::counting_iterator<uint> index_sequence_begin(0); thrust::transform( index_sequence_begin, index_sequence_begin + nx*ny*nz, d_vec_in.begin(), RandGen()); h_vec_in = d_vec_in; d_vec_in.clear(); d_vec_in.shrink_to_fit(); #endif hipDeviceSetCacheConfig(hipFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuTest" << endl; gpuTest<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuTest" << endl; cout << "Begin zfpTest" << endl; zfpTest<double>(h_vec_in); cout << "Finish zfpTest" << endl; mixedTestGPUtoCPU<long long, unsigned long long, double, BSIZE>(h_vec_in); mixedTestCPUtoGPU<long long, unsigned long long, double, BSIZE>(h_vec_in); }
542c1bfa501454c938bb997a54276c5a20660ab0.cu
#include <iostream> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> #include <assert.h> #include <omp.h> #define KEPLER 0 #include "ErrorCheck.h" #include "include/encode.cuh" #include "include/decode.cuh" #include "zfparray3.h" using namespace thrust; using namespace std; #define index(x, y, z) ((x) + 4 * ((y) + 4 * (z))) const size_t nx = 512; const size_t ny = 512; const size_t nz = 512; //BSIZE is the length of the array in class Bit //It's tied to MAXBITS such that //MAXBITS = sizeof(Word) * BSIZE //which is really //MAXBITS = wsize * BSIZE //e.g. if we match bits one-to-one, double -> unsigned long long // then BSIZE = 64 and MAXPBITS = 4096 #define BSIZE 16 uint minbits = 1024; uint MAXBITS = 1024; uint MAXPREC = 64; int MINEXP = -1074; const double rate = 16; size_t blksize = 0; unsigned long long group_count = 0x46acca631ull; uint size = 64; int EBITS = 11; /* number of exponent bits */ const int EBIAS = 1023; const int intprec = 64; static const unsigned char perm[64] = { index(0, 0, 0), // 0 : 0 index(1, 0, 0), // 1 : 1 index(0, 1, 0), // 2 : 1 index(0, 0, 1), // 3 : 1 index(0, 1, 1), // 4 : 2 index(1, 0, 1), // 5 : 2 index(1, 1, 0), // 6 : 2 index(2, 0, 0), // 7 : 2 index(0, 2, 0), // 8 : 2 index(0, 0, 2), // 9 : 2 index(1, 1, 1), // 10 : 3 index(2, 1, 0), // 11 : 3 index(2, 0, 1), // 12 : 3 index(0, 2, 1), // 13 : 3 index(1, 2, 0), // 14 : 3 index(1, 0, 2), // 15 : 3 index(0, 1, 2), // 16 : 3 index(3, 0, 0), // 17 : 3 index(0, 3, 0), // 18 : 3 index(0, 0, 3), // 19 : 3 index(2, 1, 1), // 20 : 4 index(1, 2, 1), // 21 : 4 index(1, 1, 2), // 22 : 4 index(0, 2, 2), // 23 : 4 index(2, 0, 2), // 24 : 4 index(2, 2, 0), // 25 : 4 index(3, 1, 0), // 26 : 4 index(3, 0, 1), // 27 : 4 index(0, 3, 1), // 28 : 4 index(1, 3, 0), // 29 : 4 index(1, 0, 3), // 30 : 4 index(0, 1, 3), // 31 : 4 index(1, 2, 2), // 32 : 5 index(2, 1, 2), // 33 : 5 index(2, 2, 1), // 34 : 5 index(3, 1, 1), // 35 : 5 index(1, 3, 1), // 36 : 5 index(1, 1, 3), // 37 : 5 index(3, 2, 0), // 38 : 5 index(3, 0, 2), // 39 : 5 index(0, 3, 2), // 40 : 5 index(2, 3, 0), // 41 : 5 index(2, 0, 3), // 42 : 5 index(0, 2, 3), // 43 : 5 index(2, 2, 2), // 44 : 6 index(3, 2, 1), // 45 : 6 index(3, 1, 2), // 46 : 6 index(1, 3, 2), // 47 : 6 index(2, 3, 1), // 48 : 6 index(2, 1, 3), // 49 : 6 index(1, 2, 3), // 50 : 6 index(0, 3, 3), // 51 : 6 index(3, 0, 3), // 52 : 6 index(3, 3, 0), // 53 : 6 index(3, 2, 2), // 54 : 7 index(2, 3, 2), // 55 : 7 index(2, 2, 3), // 56 : 7 index(1, 3, 3), // 57 : 7 index(3, 1, 3), // 58 : 7 index(3, 3, 1), // 59 : 7 index(2, 3, 3), // 60 : 8 index(3, 2, 3), // 61 : 8 index(3, 3, 2), // 62 : 8 index(3, 3, 3), // 63 : 9 }; static size_t block_size(double rate) { return (lrint(64 * rate) + CHAR_BIT - 1) / CHAR_BIT; } template<class Scalar> void setupConst(const unsigned char *perm, uint maxbits_, uint maxprec_, int minexp_, int ebits_, int ebias_ ) { ErrorCheck ec; ec.chk("setupConst start"); cudaMemcpyToSymbol(c_perm, perm, sizeof(unsigned char) * 64, 0); ec.chk("setupConst: c_perm"); cudaMemcpyToSymbol(c_maxbits, &MAXBITS, sizeof(uint)); ec.chk("setupConst: c_maxbits"); const uint sizeof_scalar = sizeof(Scalar); cudaMemcpyToSymbol(c_sizeof_scalar, &sizeof_scalar, sizeof(uint)); ec.chk("setupConst: c_sizeof_scalar"); cudaMemcpyToSymbol(c_maxprec, &maxprec_, sizeof(uint)); ec.chk("setupConst: c_maxprec"); cudaMemcpyToSymbol(c_minexp, &minexp_, sizeof(int)); ec.chk("setupConst: c_minexp"); cudaMemcpyToSymbol(c_ebits, &ebits_, sizeof(int)); ec.chk("setupConst: c_ebits"); cudaMemcpyToSymbol(c_ebias, &ebias_, sizeof(int)); ec.chk("setupConst: c_ebias"); ec.chk("setupConst finished"); } //Used to generate rand array in CUDA with Thrust struct RandGen { RandGen() {} __device__ float operator () (const uint idx) { thrust::default_random_engine randEng; thrust::uniform_real_distribution<float> uniDist(0.0, 0.0001); randEng.discard(idx); return uniDist(randEng); } }; // forward decorrelating transform template<class Int> __device__ __host__ static void fwd_xform_zy(Int* p) { for (uint z = 0; z < 4; z++) for (uint y = 4; y-- > 0;) cuZFP::fwd_lift<Int, 1>(p + 4 * y + 16 * z); } // forward decorrelating transform template<class Int> __device__ __host__ static void fwd_xform_xz(Int* p) { for (uint x = 4; x-- > 0;) for (uint z = 4; z-- > 0;) cuZFP::fwd_lift<Int, 4>(p + 16 * z + 1 * x); } // forward decorrelating transform template<class Int> __host__ static void fwd_xform_yx(Int* p) { for (uint y = 4; y-- > 0;) for (uint x = 4; x-- > 0;) cuZFP::fwd_lift<Int, 16>(p + 1 * x + 4 * y); } // forward decorrelating transform template<class Int> __host__ static void fwd_xform(Int* p) { fwd_xform_zy(p); fwd_xform_xz(p); fwd_xform_yx(p); } template<class Int, class UInt, class Scalar, uint bsize> void cpuEncode ( dim3 gridDim, dim3 blockDim, const unsigned long long count, uint size, const Scalar* data, const unsigned char *g_cnt, Word *block ) { dim3 blockIdx; for (blockIdx.z = 0; blockIdx.z < gridDim.z; blockIdx.z++){ for (blockIdx.y = 0; blockIdx.y < gridDim.y; blockIdx.y++){ for (blockIdx.x = 0; blockIdx.x <gridDim.x; blockIdx.x++){ Int sh_q[64]; UInt sh_p[64]; uint sh_m[64], sh_n[64]; Bitter sh_bitters[64]; unsigned char sh_sbits[64]; uint mx = blockIdx.x, my = blockIdx.y, mz = blockIdx.z; mx *= 4; my *= 4; mz *= 4; int emax = cuZFP::max_exp_block(data, mx, my, mz, 1, blockDim.x * gridDim.x, gridDim.x * gridDim.y * blockDim.x * blockDim.y); // uint sz = gridDim.x*blockDim.x * 4 * gridDim.y*blockDim.y * 4; // uint sy = gridDim.x*blockDim.x * 4; // uint sx = 1; cuZFP::fixed_point_block<Int, Scalar, intprec>(sh_q, data, emax, mx, my, mz, 1, blockDim.x * gridDim.x, gridDim.x * gridDim.y * blockDim.x * blockDim.y); fwd_xform(sh_q); //fwd_order for (int i = 0; i < 64; i++){ sh_p[i] = cuZFP::int2uint<Int, UInt>(sh_q[perm[i]]); } uint bidx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); //cuZFP::Bit<bsize> stream(block + bidx*bsize); unsigned long long x[64], y[64]; Bitter bitter[64]; for (int i = 0; i < 64; i++){ bitter[i] = make_bitter(0, 0); } uint s_emax_bits[1]; s_emax_bits[0] = 1; //maxprec, minexp, EBITS // uint k = threadIdx.x + blockDim.x * blockIdx.x; int maxprec = cuZFP::precision(emax, MAXPREC, MINEXP); int ebits = EBITS + 1; const uint kmin = intprec > maxprec ? intprec - maxprec : 0; uint e = maxprec ? emax + EBIAS : 0; //printf("%d %d %d %d\n", emax, maxprec, ebits, e); if (e){ //write_bitters(bitter[0], make_bitter(2 * e + 1, 0), ebits, sbit[0]); block[bidx*bsize] = 2 * e + 1; //stream[bidx].write_bits(2 * e + 1, ebits); s_emax_bits[0] = ebits; } // const uint kmin = intprec > MAXPREC ? intprec - MAXPREC : 0; //unsigned long long x[64]; #pragma omp parallel for for (int tid = 0; tid<64; tid++){ /* step 1: extract bit plane #k to x */ x[tid] = 0; for (int i = 0; i < size; i++) x[tid] += (uint64)((sh_p[i] >> tid) & 1u) << i; y[tid] = x[tid]; } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ sh_m[tid] = 0; sh_n[tid] = 0; sh_sbits[tid] = 0; } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ //get the index of the first 'one' in the bit plane for (int i = 0; i < 64; i++){ if (!!(x[tid] >> i)) sh_n[tid] = i + 1; } } for (int i = 0; i < 63; i++){ sh_m[i] = sh_n[i + 1]; } //make sure that m increases isotropically for (int i = intprec - 1; i-- > 0;){ if (sh_m[i] < sh_m[i + 1]) sh_m[i] = sh_m[i + 1]; } //compute the number of bits used per thread int bits[64]; #pragma omp parallel for for (int tid = 0; tid < 64; tid++) { bits[tid] = 128; int n = 0; /* step 2: encode first n bits of bit plane */ bits[tid] -= sh_m[tid]; x[tid] >>= sh_m[tid]; x[tid] = (sh_m[tid] != 64) * x[tid]; n = sh_m[tid]; /* step 3: unary run-length encode remainder of bit plane */ for (; n < size && bits[tid] && (bits[tid]--, !!x[tid]); x[tid] >>= 1, n++) for (; n < size - 1 && bits[tid] && (bits[tid]--, !(x[tid] & 1u)); x[tid] >>= 1, n++) ; } //number of bits read per thread //#pragma omp parallel for for (int tid = 0; tid < 64; tid++){ bits[tid] = (128 - bits[tid]); } #pragma omp parallel for for (int tid = 0; tid < 64; tid++){ sh_n[tid] = min(sh_m[tid], bits[tid]); } #pragma omp parallel for for (int tid = 0; tid < 64; tid++) { /* step 2: encode first n bits of bit plane */ unsigned char sbits = 0; //y[tid] = stream[bidx].write_bits(y[tid], sh_m[tid]); y[tid] = write_bitters(bitter[tid], make_bitter(y[tid], 0), sh_m[tid], sbits); uint n = sh_n[tid]; /* step 3: unary run-length encode remainder of bit plane */ for (; n < size && bits[tid] && (bits[tid]-- && write_bitter(bitter[tid], !!y[tid], sbits)); y[tid] >>= 1, n++) for (; n < size - 1 && bits[tid] && (bits[tid]-- && !write_bitter(bitter[tid], y[tid] & 1u, sbits)); y[tid] >>= 1, n++) ; sh_bitters[63 - tid] = bitter[tid]; sh_sbits[63 - tid] = sbits; } uint rem_sbits = s_emax_bits[0]; uint tot_sbits = s_emax_bits[0]; uint offset = 0; for (int i = 0; i < intprec && tot_sbits < MAXBITS; i++){ if (sh_sbits[i] <= 64){ write_outx<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, sh_sbits[i]); } else{ write_outx<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, 64); if (tot_sbits < MAXBITS) write_outy<bsize>(sh_bitters, block + bidx*bsize, rem_sbits, tot_sbits, offset, i, sh_sbits[i] - 64); } } } } } } template<class Int, class UInt, class Scalar, uint bsize, uint num_sidx> void cpuDecode ( dim3 gridDim, dim3 blockDim, size_t *sidx, Word *block, Scalar *out, const unsigned long long orig_count ) { dim3 blockIdx; for (blockIdx.z = 0; blockIdx.z < gridDim.z; blockIdx.z++){ for (blockIdx.y = 0; blockIdx.y < gridDim.y; blockIdx.y++){ for (blockIdx.x = 0; blockIdx.x < gridDim.x; blockIdx.x++){ uint idx = (blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.y * gridDim.x); uint bdim = blockDim.x*blockDim.y*blockDim.z; uint bidx = idx*bdim; cuZFP::Bit<bsize> stream; size_t s_sidx[64];// = (size_t*)&smem[0]; //if (tid < num_sidx) for (int tid = 0; tid < num_sidx; tid++){ s_sidx[tid] = sidx[tid]; } uint s_idx_n[64];// = (uint*)&smem[s_sidx[0]]; uint s_idx_g[64];// = (uint*)&smem[s_sidx[1]]; unsigned long long s_bit_cnt[64];// = (unsigned long long*)&smem[s_sidx[2]]; uint s_bit_rmn_bits[64];// = (uint*)&smem[s_sidx[3]]; char s_bit_offset[64];// = (char*)&smem[s_sidx[4]]; uint s_bit_bits[64];// = (uint*)&smem[s_sidx[5]]; Word s_bit_buffer[64];// = (Word*)&smem[s_sidx[6]]; UInt s_data[64];// = (UInt*)&smem[s_sidx[7]]; Int s_q[64]; uint s_kmin[1]; int s_emax[1]; //stream[idx].rewind(); stream.read_bit(); uint ebits = EBITS + 1; s_emax[0] = stream.read_bits(ebits - 1) - EBIAS; int maxprec = cuZFP::precision(s_emax[0], MAXPREC, MINEXP); s_kmin[0] = intprec > maxprec ? intprec - maxprec : 0; for (int tid = 0; tid < size; tid++) s_data[tid] = 0; uint bits = MAXBITS - ebits; unsigned long long x[64]; int *sh_idx = new int[bsize*64]; int *sh_tmp_idx = new int[bsize * 64]; for (int tid = 0; tid < 64; tid++){ for (int i = 0; i < 16; i++){ sh_idx[i * 64 + tid] = -1; sh_tmp_idx[i * 64 + tid] = -1; } } int sh_cnt[bsize]; int beg_idx[bsize]; for (int tid = 0; tid < 64; tid++){ if (tid < bsize){ beg_idx[tid] = 0; if (tid == 0) beg_idx[tid] = ebits; sh_cnt[tid] = 0; for (int i = beg_idx[tid]; i < 64; i++){ if ((stream.begin[tid] >> i) & 1u){ sh_tmp_idx[tid * 64 + sh_cnt[tid]++] = tid*64 + i; } } } } //fix blocks since they are off by ebits for (int i = 0; i < bsize; i++){ for (int tid = 0; tid < 64; tid++){ if (tid < sh_cnt[i]){ sh_tmp_idx[i*64 + tid] -= ebits; } } } for (int tid = 0; tid < 64; tid++){ if (tid < sh_cnt[0]) sh_idx[tid] = sh_tmp_idx[tid]; } for (int i = 1; i < bsize; i++){ for (int tid = 0; tid < 64; tid++){ if (tid == 0) sh_cnt[i] += sh_cnt[i - 1]; if (tid < sh_cnt[i]){ sh_idx[sh_cnt[i - 1] + tid] = sh_tmp_idx[i * 64 + tid]; } } } /* decode one bit plane at a time from MSB to LSB */ int cnt = 0; //uint new_n = 0; uint bits_cnt = ebits; for (uint tid = intprec, n = 0; bits && tid-- > s_kmin[0];) { /* decode first n bits of bit plane #k */ uint m = MIN(n, bits); bits -= m; bits_cnt += m; x[tid] = stream.read_bits(m); /* unary run-length decode remainder of bit plane */ for (; n < size && bits && (bits--, bits_cnt++, stream.read_bit()); x[tid] += (uint64)1 << n++){ int num_bits = 0; uint chk = 0; //uint tmp_bits = stream[idx].bits; //Word tmp_buffer = stream[idx].buffer; //char tmp_offset = stream[idx].offset; //for (; n < size - 1 && bits && (bits--, !stream[idx].read_bit()); n++) // ; //stream[idx].bits = tmp_bits; //stream[idx].buffer = tmp_buffer; //stream[idx].offset = tmp_offset; while (n < size - 1 && bits && (bits--, bits_cnt++, !stream.read_bit())){ //the number of bits read in one go: //this can be affected by running out of bits in the block (variable bits) // and how much is encoded per number (variable n) // and how many zeros there are since the last one bit. // Finally, the last bit isn't read because we'll check it to see // where we are /* fast forward to the next one bit that hasn't been read yet*/ while (sh_idx[cnt] < bits_cnt - ebits){ cnt++; } cnt--; //compute the raw number of bits between the last one bit and the current one bit num_bits = sh_idx[cnt + 1] - sh_idx[cnt]; //the one bit as two positions previous num_bits -= 2; num_bits = min(num_bits, (size - 1) - n - 1); bits_cnt += num_bits; if (num_bits > 0){ stream.read_bits(num_bits); bits -= num_bits; n += num_bits; } n++; } //if (n != new_n || new_bits != bits){ // cout << n << " " << new_n << " " << bits << " " << new_bits << " " << blockIdx.x * gridDim.x << " " << blockIdx.y*gridDim.y << " " << blockIdx.z * gridDim.z << endl; // exit(0); //} } /* deposit bit plane from x */ for (int i = 0; x[tid]; i++, x[tid] >>= 1) s_data[i] += (UInt)(x[tid] & 1u) << tid; } for (int tid = 0; tid < 64; tid++){ s_q[perm[tid]] = cuZFP::uint2int<Int, UInt>(s_data[tid]); } uint mx = blockIdx.x, my = blockIdx.y, mz = blockIdx.z; mx *= 4; my *= 4; mz *= 4; cuZFP::inv_xform(s_q); cuZFP::inv_cast<Int, Scalar>(s_q, out, s_emax[0], mx, my, mz, 1, gridDim.x*blockDim.x, gridDim.x*blockDim.x * gridDim.y*blockDim.y); } } } } template<class Int, class UInt, class Scalar, uint bsize> void gpuTest ( host_vector<Scalar> &h_data ) { host_vector<int> h_emax; host_vector<UInt> h_p; host_vector<Int> h_q; host_vector<UInt> h_buf(nx*ny*nz); host_vector<cuZFP::Bit<bsize> > h_bits; device_vector<unsigned char> d_g_cnt; device_vector<Scalar> data; data = h_data; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; cudaEvent_t start, stop; float millisecs; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, data, block, group_count, size); cudaStreamSynchronize(0); ec.chk("cudaEncode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode GPU in time: " << millisecs/1000.0 << endl; thrust::host_vector<Word > cpu_block; cpu_block = block; UInt sum = 0; for (int i = 0; i < cpu_block.size(); i++){ sum += cpu_block[i]; } cout << "encode UInt sum: " << sum << endl; cudaMemset(thrust::raw_pointer_cast(data.data()), 0, sizeof(Scalar)*data.size()); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, block, data, group_count); ec.chk("cudaDecode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); cout << "decode parallel GPU in time: " << millisecs/1000.0 << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; host_vector<Scalar> h_out = data; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1 ) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; //gpuValidate<Int, UInt, Scalar, bsize>(h_data, q, data); } template<class Int, class UInt, class Scalar, uint bsize> void cpuTestBitStream ( host_vector<Scalar> &h_data ) { host_vector<int> h_emax; host_vector<UInt> h_p; host_vector<Int> h_q; host_vector<UInt> h_buf(nx*ny*nz); host_vector<Word > h_bits; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; host_vector<Word > cpu_block(emax_size.x * emax_size.y * emax_size.z * bsize); block_size = dim3(4, 4, 4); grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; unsigned long long count = group_count; host_vector<unsigned char> g_cnt(10); uint sum = 0; g_cnt[0] = 0; for (int i = 1; i < 10; i++){ sum += count & 0xf; g_cnt[i] = sum; count >>= 4; } cpuEncode<Int, UInt, Scalar, bsize>( grid_size, block_size, group_count, size, thrust::raw_pointer_cast(h_data.data()), thrust::raw_pointer_cast(g_cnt.data()), thrust::raw_pointer_cast(cpu_block.data())); unsigned long long block_sum = 0; for (int i = 0; i < cpu_block.size(); i++){ block_sum += cpu_block[i]; } cout << "encode UInt sum: " << block_sum << endl; host_vector<Scalar> h_out(nx*ny* nz); block_size = dim3(4, 4, 4); grid_size = dim3(nx, ny, nz); grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; size_t blcksize = block_size.x *block_size.y * block_size.z; size_t s_idx[12] = { sizeof(size_t) * 12, blcksize * sizeof(uint), blcksize * sizeof(uint), +blcksize * sizeof(unsigned long long), blcksize * sizeof(uint), blcksize * sizeof(char), blcksize * sizeof(uint), blcksize * sizeof(Word), blcksize * sizeof(UInt), blcksize * sizeof(Int), sizeof(uint), sizeof(int) }; thrust::inclusive_scan(s_idx, s_idx + 11, s_idx); const size_t shmem_size = thrust::reduce(s_idx, s_idx + 11); cpuDecode < Int, UInt, Scalar, bsize, 9 > (grid_size, block_size, s_idx, raw_pointer_cast(cpu_block.data()), raw_pointer_cast(h_out.data()), group_count); double tot_sum = 0, max_diff = 0, min_diff = 1e16; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl; //gpuValidate<Int, UInt, Scalar, bsize>(h_data, q, data); } template<typename Scalar> void zfpTest ( host_vector<Scalar> &h_data ) { double start_time = omp_get_wtime(); #if 0 //doing the compression with sextuple nested for loop is actually //faster than the array3d way. zfp_field* field = zfp_field_alloc(); zfp_field_set_type(field, zfp::codec<Scalar>::type); zfp_field_set_pointer(field, thrust::raw_pointer_cast(h_data.data())); zfp_field_set_size_3d(field, nx, ny, nz); zfp_stream* stream = zfp_stream_open(0); uint n = zfp_field_size(field, NULL); uint dims = zfp_field_dimensionality(field); zfp_type type = zfp_field_type(field); Scalar new_rate = zfp_stream_set_rate(stream, rate, type, dims, 0); size_t bufsize = zfp_stream_maximum_size(stream, field); uchar* buffer = new uchar[bufsize]; bitstream* s = stream_open(buffer, bufsize); zfp_stream_set_bit_stream(stream, s); zfp_stream_rewind(stream); int m = 0; for (int z = 0; z < nz; z += 4){ for (int y = 0; y < ny; y += 4){ for (int x = 0; x < nx; x += 4){ Scalar b[64]; m = 0; for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ for (int k = 0; k < 4; k++, m++){ b[m] = h_data[(z + i)*nx*ny + (y + j)*nx + x + k]; } } } zfp_encode_block_double_3(stream, b); } } } Scalar time = omp_get_wtime() - start_time; cout << "encode time: " << time << endl; //cout << "sum UInt " << thrust::reduce(stream->begin, stream->end) << endl; stream_flush(s); host_vector<Scalar> h_out(nx*ny*nz); stream_rewind(s); start_time = omp_get_wtime(); for (int z = 0; z < nz; z += 4){ for (int y = 0; y < ny; y += 4){ for (int x = 0; x < nx; x += 4){ m = 0; Scalar b[64]; zfp_decode_block_double_3(stream, b); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ for (int k = 0; k < 4; k++, m++){ h_out[(z + i)*nx*ny + (y + j)*nx + x + k] = b[m]; } } } } } } #else zfp::array3d u(nx, ny, nz, rate); //this isn't any faster than straight 0 to n-1 //for (int z = 0; z < nz; z++){ // for (int y = 0; y < nz; y++){ // for (int x = 0; x < nx; x++){ // u[x + y * nx + z * nx * ny] = h_data[x + y * nx + z * nx * ny]; // } // } //} for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_data[i]; } double time = omp_get_wtime() - start_time; cout << "encode time: " << time << endl; start_time = omp_get_wtime(); host_vector<Scalar> h_out(nx*ny*nz); for (int i = 0; i < nx*ny*nz; i++){ h_out[i] = u[i]; } time = omp_get_wtime() - start_time; cout << "decode time: " << time << endl; #endif Scalar tot_diff = 0; for (int i = 0; i < nx*ny*nz; i++){ Scalar diff = fabs(h_data[i] - h_out[i]); tot_diff += diff; } cout << "tot diff: " << tot_diff << " average diff: " << tot_diff / (float)h_out.size() << endl;// " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum : " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl; } template<class Int, class UInt, class Scalar, uint bsize> void mixedTestCPUtoGPU ( host_vector<Scalar> &h_data ) { dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; cudaEvent_t start, stop; float millisecs; double start_time = omp_get_wtime(); zfp::array3d u(nx, ny, nz, rate); for (int i = 0; i < nx*ny*nz; i++){ u[i] = h_data[i]; } double time = omp_get_wtime() - start_time; u.flush_cache(); cout << "encode time: " << time << endl; unsigned char * cd = u.compressed_data(); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); host_vector<Word> h_block(emax_size.x * emax_size.y * emax_size.z * bsize); memcpy(h_block.data(), cd, u.compressed_size()); block = h_block; device_vector<Scalar> data(nx*ny*nz, 0.0); cuZFP::decode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, block, data, group_count); ec.chk("cudaDecode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); cout << "decode parallel GPU in time: " << millisecs / 1000.0 << endl; double tot_sum = 0, max_diff = 0, min_diff = 1e16; host_vector<Scalar> h_out = data; for (int i = 0; i < h_data.size(); i++){ int k = 0, j = 0; frexp(h_data[i], &j); frexp(h_out[i], &k); //if (abs(j - k) > 1){ // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; // //exit(-1); //} double diff = fabs(h_data[i] - h_out[i]); //if (diff > 1 ) // cout << i << " " << j << " " << k << " " << h_data[i] << " " << h_out[i] << endl; if (max_diff < diff) max_diff = diff; if (min_diff > diff) min_diff = diff; tot_sum += diff; } cout << "tot diff: " << tot_sum << " average diff: " << tot_sum / (float)h_data.size() << " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum: " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; } template<class Int, class UInt, class Scalar, uint bsize> void mixedTestGPUtoCPU ( host_vector<Scalar> &h_data ) { device_vector<Scalar> data; data = h_data; dim3 emax_size(nx / 4, ny / 4, nz / 4); dim3 block_size(8, 8, 8); dim3 grid_size = emax_size; grid_size.x /= block_size.x; grid_size.y /= block_size.y; grid_size.z /= block_size.z; //const uint kmin = intprec > maxprec ? intprec - maxprec : 0; ErrorCheck ec; cudaEvent_t start, stop; float millisecs; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); device_vector<Word > block(emax_size.x * emax_size.y * emax_size.z * bsize); cuZFP::encode<Int, UInt, Scalar, bsize, intprec>(nx, ny, nz, data, block, group_count, size); cudaStreamSynchronize(0); ec.chk("cudaEncode"); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&millisecs, start, stop); ec.chk("cudaencode"); cout << "encode mixed GPU in time: " << millisecs / 1000.0 << endl; host_vector<Word> h_block = block; zfp::array3d u(nx, ny, nz, rate); unsigned char *wtf = u.compressed_data(); memcpy(wtf, raw_pointer_cast(h_block.data()), u.compressed_size()); double start_time = omp_get_wtime(); host_vector<Scalar> h_out(nx*ny*nz); for (int i = 0; i < nx*ny*nz; i++){ h_out[i] = u[i]; } double time = omp_get_wtime() - start_time; cout << "decode mixd CPU time: " << time << endl; Scalar tot_diff = 0; for (int i = 0; i < nx*ny*nz; i++){ Scalar diff = fabs(h_data[i] - h_out[i]); tot_diff += diff; } cout << "tot diff: " << tot_diff << " average diff: " << tot_diff / (float)h_out.size() << endl;// " max diff: " << max_diff << " min diff: " << min_diff << endl; cout << "sum : " << thrust::reduce(h_data.begin(), h_data.end()) << " " << thrust::reduce(h_out.begin(), h_out.end()) << endl << endl; } int main() { host_vector<double> h_vec_in(nx*ny*nz); #if 0 for (int z=0; z<nz; z++){ for (int y=0; y<ny; y++){ for (int x=0; x<nx; x++){ if (x == 0) h_vec_in[z*nx*ny + y*nx + x] = 10; else if(x == nx - 1) h_vec_in[z*nx*ny + y*nx + x] = 0; else h_vec_in[z*nx*ny + y*nx + x] = 5; } } } #else device_vector<double> d_vec_in(nx*ny*nz); thrust::counting_iterator<uint> index_sequence_begin(0); thrust::transform( index_sequence_begin, index_sequence_begin + nx*ny*nz, d_vec_in.begin(), RandGen()); h_vec_in = d_vec_in; d_vec_in.clear(); d_vec_in.shrink_to_fit(); #endif cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); setupConst<double>(perm, MAXBITS, MAXPREC, MINEXP, EBITS, EBIAS); cout << "Begin gpuTest" << endl; gpuTest<long long, unsigned long long, double, BSIZE>(h_vec_in); cout << "Finish gpuTest" << endl; cout << "Begin zfpTest" << endl; zfpTest<double>(h_vec_in); cout << "Finish zfpTest" << endl; mixedTestGPUtoCPU<long long, unsigned long long, double, BSIZE>(h_vec_in); mixedTestCPUtoGPU<long long, unsigned long long, double, BSIZE>(h_vec_in); }
dad9ae1f00fec10eddc08fd5bc3b05f1162ab9ef.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vectorAdd.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
dad9ae1f00fec10eddc08fd5bc3b05f1162ab9ef.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vectorAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vectorAdd<<<gridBlock,threadBlock>>>(a,b,c,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vectorAdd<<<gridBlock,threadBlock>>>(a,b,c,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vectorAdd<<<gridBlock,threadBlock>>>(a,b,c,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9282ad9ed9d5c5d837d57573f3357ce7df3fbc08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/signed_sqrt_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void caffe_gpu_signed_sqrt(const int nthreads, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { if (src[index] >= 0) dst[index] = sqrt(src[index]); else dst[index] = -sqrt(-src[index]); } } template <typename Dtype> void SignedSqrtLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); caffe_gpu_signed_sqrt<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); } template <typename Dtype> void SignedSqrtLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const int count = bottom[0]->count(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_abs(count, top_data, bottom_diff); caffe_gpu_add_scalar(count, epsilon, bottom_diff); caffe_gpu_div(count, top_diff, bottom_diff, bottom_diff); caffe_gpu_scal(count, Dtype(0.5), bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SignedSqrtLayer); } // namespace caffe
9282ad9ed9d5c5d837d57573f3357ce7df3fbc08.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/signed_sqrt_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void caffe_gpu_signed_sqrt(const int nthreads, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { if (src[index] >= 0) dst[index] = sqrt(src[index]); else dst[index] = -sqrt(-src[index]); } } template <typename Dtype> void SignedSqrtLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); caffe_gpu_signed_sqrt<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); } template <typename Dtype> void SignedSqrtLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const int count = bottom[0]->count(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_abs(count, top_data, bottom_diff); caffe_gpu_add_scalar(count, epsilon, bottom_diff); caffe_gpu_div(count, top_diff, bottom_diff, bottom_diff); caffe_gpu_scal(count, Dtype(0.5), bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SignedSqrtLayer); } // namespace caffe
610074226d17c247ca4e59f60bd95047fbad3852.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************\ * Cuda Accelerated Matrix Library * * * * by * * Elliott Forney * * 3.9.2010 * \*********************************/ /* need to fix all kernels to handle matrices with diff strides or else ensure that all matrices will have same stride in r0,v1,c0v,cv */ /* * Libraries */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include "errcheck.h" #include "matrix.h" /* * Macros */ //** Debugging level #define DEBUG 0 //** Initialization // max number of chars per line in input table #define line_buff_size 1024 //** Access // row major indexing // expects stride and data to be set #define data_at(r,c) data[r*stride+c] //** Addition/Subtraction // when rows/cols < rlim/clim use small kernel // must be greater than or equal to add_big_tpb #define add_rlim 512 #define add_clim 512 // threads per block for small addition kernel #define add_small_tpb 64 // threads per block for big addition kernel // this must be smaller than and divide // lcm(mult_tn,2*mult_tp) evenly #define add_big_tpb 128 // stripe size for big addition kernel #define add_big_stripe 4 //** Multiplication // tile sizes #define mult_tm 20 #define mult_tn 16 #define mult_tp 64 /* small mult tile sizes, uncomment for debugging #define mult_tm 4 #define mult_tn 4 #define mult_tp 4 */ //** Transpose // tile sizes #define trans_tile_r 4 #define trans_tile_c 32 // virtualization #define trans_stripe 8 /* small tiles, uncomment for debugging #define trans_tile_r 2 #define trans_tile_c 4 #define trans_stripe 2 */ //** Combination/Separation // threads per block for add/remove row/col kernels #define r0_tpb 64 /* * Global variables */ //** random generator stuff needs to be redone, non-reentrant unsigned rand_state = 7; /* * Kernel function prototypes */ // sigmoid function __device__ float phi(float v); // derivative of sigmoid function given phi(v) __device__ float phi_prime(float z); // addition kernel for small matrices __global__ void add_small_kern(float *a, float *b, float *c, unsigned n); // addition kernel for big matrices __global__ void add_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // subtraction kernel for small matrices __global__ void sub_small_kern(float *a, float *b, float *c, unsigned n); // subtraction kernel for big matrices __global__ void sub_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // matrix multiplication kernel __global__ void mult_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c); // matrix multiplication plus component-wise function apply __global__ void mult_phi_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c); // transpose kernel __global__ void trans_kern(float *a, float *b, unsigned nrow, unsigned astride, unsigned bstride); // square kernel for small matrices __global__ void sqr_small_kern(float *a, float *b, unsigned n); // square kernel for large matrices __global__ void sqr_big_kern(float *a, float *b, unsigned n, unsigned stride); // scalar multiply kernel for small matrices __global__ void scl_small_kern(float *a, float b, float *c, unsigned n); // scalar multiply kernel for big matrices __global__ void scl_big_kern(float *a, float b, float *c, unsigned n, unsigned stride); // scalar multiply & pointwise addition kernel for small matrices __global__ void scl_add_small_kern(float *a, float b, float *c, float *d, unsigned n); // scalar multiply & pointwise addition kernel for big matrices __global__ void scl_add_big_kern(float *a, float b, float *c, float *d, unsigned n, unsigned stride); // pointwise multiply kernel for small matrices __global__ void pmult_small_kern(float *a, float *b, float *c, unsigned n); // pointwise multiply kernel for big matrices __global__ void pmult_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // __global__ void phi_prime_small_kern(float *a, float *b, unsigned n); // __global__ void phi_prime_big_kern(float *a, float *b, unsigned n, unsigned stride); // __global__ void delta_small_kern(float *a, float *b, float *c, float denom, unsigned n); // __global__ void delta_big_kern(float *a, float *b, float *c, float denom, unsigned n, unsigned stride); // remove last row __global__ void r0_kern(float *m, unsigned nrow, unsigned ncol); // add row of ones kernel __global__ void r1_kern(float *m, unsigned nrow, unsigned ncol); // __global__ void c0v_kern(float *a, float *b, unsigned nrow, unsigned stride); // __global__ void cv_kern(float *a, float *b, unsigned nrow, unsigned stride); // zero out all values in m for small matrices __global__ void zero_small_kern(float *m, unsigned n); // zero out all values in m for big matrices __global__ void zero_big_kern(float *m, unsigned n, unsigned stride); /* * Kernel function bodies */ // sigmoid function __device__ float phi(float v) { // recommended by Lecun, find citation!! return 1.7159f * tanh( (2.0f/3.0f) * v ); } // derivative of sigmoid function given phi(v) __device__ float phi_prime(float z) { return (2.0f/3.0f) * (1.7159f - z*z); } // addition kernel for small matrices __global__ void add_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // sum one value a[id] = b[id] + c[id]; } // addition kernel for big matrices __global__ void add_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread sums down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] + c[i]; // sum value } // subtraction kernel for small matrices __global__ void sub_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // subtract one value a[id] = b[id] - c[id]; } // subtraction kernel for big matrices __global__ void sub_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread sums down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] - c[i]; // subtract value } // matrix multiplication kernel // expects padded to tile sizes __global__ void mult_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c) { // loop counter unsigned i, j; // starting positions in respective tiles float *acur, *bcur, *ccur; { const unsigned block_pos_r = blockIdx.y*mult_tm; const unsigned block_pos_c = blockIdx.x*2*mult_tp; acur = a + block_pos_c + block_pos_r * a_c + threadIdx.x + threadIdx.y * mult_tn; bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x; ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn; } // end of last tile in b const float *bend = bcur + b_csub; // current a values // two way virtualization float aval_v1[mult_tm]; float aval_v2[mult_tm]; // initialize a values to zero #pragma unroll for (i = 0; i < mult_tm; ++i) { aval_v1[i] = 0.0f; aval_v2[i] = 0.0f; } // for each tile read from b do { // allocate shared space for tile in b __shared__ float bs[mult_tn][mult_tm+1]; // put tile from b into shared memory #pragma unroll for (i = 0; i < mult_tm; i += (mult_tp/mult_tn)) bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c]; // move b's tile across bcur += mult_tn; // synchronize to ensure bll elements are read __syncthreads(); // for each row in tile of c #pragma unroll for (i = 0; i < mult_tn; ++i) { // do mults and adds #pragma unroll for (j = 0; j < mult_tm; ++j) { aval_v1[j] += bs[i][j] * ccur[0]; aval_v2[j] += bs[i][j] * ccur[mult_tp]; } ccur += c_c; } __syncthreads(); } while (bcur < bend); // until last tile in b // copy results to global memory #pragma unroll for (i = 0; i < mult_tm; ++i, acur += a_c) { acur[0] = aval_v1[i]; acur[mult_tp] = aval_v2[i]; } } // matrix multiplication plus component-wise function apply __global__ void mult_phi_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c) { // loop counter unsigned i, j; // starting positions in respective tiles float *acur, *bcur, *ccur; { const unsigned block_pos_r = blockIdx.y*mult_tm; const unsigned block_pos_c = blockIdx.x*2*mult_tp; acur = a + block_pos_c + block_pos_r * a_c + threadIdx.x + threadIdx.y * mult_tn; bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x; ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn; } // end of last tile in b const float *bend = bcur + b_csub; // current a values // two way virtualization float aval_v1[mult_tm]; float aval_v2[mult_tm]; // initialize a values to zero #pragma unroll for (i = 0; i < mult_tm; ++i) { aval_v1[i] = 0.0f; aval_v2[i] = 0.0f; } // for each tile read from b do { // allocate shared space for tile in b __shared__ float bs[mult_tn][mult_tm+1]; // put tile from b into shared memory #pragma unroll for (i = 0; i < mult_tm; i += (mult_tp/mult_tn)) bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c]; // move b's tile across bcur += mult_tn; // synchronize to ensure bll elements are read __syncthreads(); // for each row in tile of c #pragma unroll for (i = 0; i < mult_tn; ++i) { // do mults and adds #pragma unroll for (j = 0; j < mult_tm; ++j) { aval_v1[j] += bs[i][j] * ccur[0]; aval_v2[j] += bs[i][j] * ccur[mult_tp]; } ccur += c_c; } __syncthreads(); } while (bcur < bend); // until last tile in b // copy results to global memory #pragma unroll for (i = 0; i < mult_tm; ++i, acur += a_c) { acur[0] = phi(aval_v1[i]); acur[mult_tp] = phi(aval_v2[i]); } } // transpose kernel // expects padded to tile size __global__ void trans_kern(float *a, float *b, unsigned nrow, unsigned astride, unsigned bstride) { unsigned i, blockIdx_x, blockIdx_y; // diagonal reordering to prevent partition camping // borrowed from NVIDIA CUDA SDK, Thanks! if (nrow == astride) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { const unsigned bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // const unsigned tile_r_stripe = trans_tile_r * trans_stripe; const unsigned tid_y_stripe = threadIdx.y * trans_stripe; // starting row and col in a const unsigned block_row = blockIdx_y * tile_r_stripe; const unsigned block_col = blockIdx_x * trans_tile_c; // thread's row and col in b unsigned row = block_col + tid_y_stripe; unsigned col = block_row + threadIdx.x; // perform tile transpose in shared memory __shared__ float tile[trans_tile_c][tile_r_stripe+1]; unsigned base = row*bstride + col; // read transposed values in from b #pragma unroll for (i = 0; i < trans_stripe; ++i) tile[threadIdx.x][tid_y_stripe+i] = b[base+i*bstride]; // wait for all threads to finish reading into shared mem __syncthreads(); // thread's row and col in a row = block_row + tid_y_stripe; col = block_col + threadIdx.x; base = row*astride + col; // write tile into a #pragma unroll for (i = 0; i < trans_stripe; ++i) a[base+i*astride] = tile[tid_y_stripe+i][threadIdx.x]; } // square kernel for small matrices __global__ void sqr_small_kern(float *a, float *b, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) { // square single value float bval = b[id]; a[id] = bval*bval; } } // square kernel for large matrices __global__ void sqr_big_kern(float *a, float *b, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) { float bval = b[i]; a[i] = bval*bval; } } // scalar multiply kernel for small matrices __global__ void scl_small_kern(float *a, float b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = b*c[id]; } // scalar multiply kernel for big matrices __global__ void scl_big_kern(float *a, float b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) a[i] = b*c[id]; } // scalar multiply & pointwise addition kernel for small matrices __global__ void scl_add_small_kern(float *a, float b, float *c, float *d, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = b*c[id]+d[id]; } // scalar multiply & pointwise addition kernel for big matrices __global__ void scl_add_big_kern(float *a, float b, float *c, float *d, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) a[i] = b*c[id]+d[id]; } // pointwise multiply kernel for small matrices __global__ void pmult_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // mult one value a[id] = b[id] * c[id]; } // pointwise multiply kernel for big matrices __global__ void pmult_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread mults down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] * c[i]; // mult value } // __global__ void phi_prime_small_kern(float *a, float *b, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // sum one value a[id] = phi_prime(b[id]); } // __global__ void phi_prime_big_kern(float *a, float *b, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread applys down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = phi_prime(b[i]); } // __global__ void delta_small_kern(float *a, float *b, float *c, float denom, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = (b[id] - c[id]) * denom; } // __global__ void delta_big_kern(float *a, float *b, float *c, float denom, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = (b[i] - c[i]) * denom; } // remove last row kernel // m should point to beginning of last row __global__ void r0_kern(float *m, unsigned ncol) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if not past last col if (id < ncol) // set a single value to zero m[id] = 0.0f; } // add row of ones kernel // m should point to first non-existant row __global__ void r1_kern(float *m, unsigned ncol) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if not past last col if (id < ncol) // set a single value to one m[id] = 1.0f; } // __global__ void c0v_kern(float *a, float *b, unsigned nrow, unsigned stride) { const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; if (id < nrow) { float *aval = a + stride * id; b[id] = *aval; *aval = 0.0f; } } // __global__ void cv_kern(float *a, float *b, unsigned nrow, unsigned stride) { const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; if (id < nrow) a[stride * id] = b[id]; } // zero out all values in m for small matrices __global__ void zero_small_kern(float *m, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // write zero m[id] = 0.0f; } // zero out all values in m for big matrices __global__ void zero_big_kern(float *m, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // write zero m[i] = 0.0f; } /* * Internal function prototypes */ // zero out all values on cpu, does not set sync state! void zero_cpu(matrix m); // zero out all values on gpu, does not set sync state! void zero_gpu(matrix m); /* * Internal function bodies */ // zero out all values on cpu, does not set sync state! void zero_cpu(matrix m) { memset(m.cpu_data, '\0', (m.rstride)*(m.cstride)*sizeof(float)); } // zero out all values on gpu, does not set sync state! void zero_gpu(matrix m) { const unsigned row = m.rstride; const unsigned col = m.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); // call small zero kernel hipLaunchKernelGGL(( zero_small_kern), dim3(grid_size), dim3(block_size), 0, 0, m.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); // call big zero kernel hipLaunchKernelGGL(( zero_big_kern), dim3(grid_size), dim3(block_size), 0, 0, m.gpu_data, row*col, col); errcheck_gpu(); } } /* * External function bodies */ //** Initialization & Destruction // initialize matrix m of size r by c extern "C" void matrix_init(matrix *m, unsigned r, unsigned c) { // set matrix dimensions m->r = r; m->c = c; // leave extra room for zero padding to make multiple of tile sizes // always leave an extra row & col for bias terms m->rstride = r + (80 - (r % 80)); // lcm(mult_tn,mult_tm) m->cstride = c + (128 - (c % 128)); // lcm(mult_tn,2*mult_tp) /* m->rstride = r + (640 - (r % 640)); m->cstride = c + (640 - (c % 640)); */ /* uncomment to debug without extra row & col of padding if ((r % 80) == 0) m->rstride = r; else m->rstride = r + (80 - (r % 80)); if ((c % 128) == 0) m->cstride = c; else m->cstride = c + (128 - (c % 128)); */ /* uncomment to debug with small tiles 4,4,4 m->rstride = r + (8 - (r % 8)); m->cstride = c + (8 - (c % 8)); */ if (DEBUG > 1) { printf("matrix size: %d %d\n", m->r, m->c); printf("with padding: %d %d\n", m->rstride, m->cstride); } // allocate space for sync state // done dynamically so we can pass copies of matrices // and utilize them without screwing up state m->sync = (matrix_sync_state*)malloc(sizeof(matrix_sync_state)); if (m->sync == NULL) errcheck_cpu(); // set initial sync to cpu *(m->sync) = matrix_sync_cpu; // allocate space for matrix on gpu hipMalloc((void**)&(m->gpu_data), (m->rstride)*(m->cstride)*sizeof(float)); errcheck_gpu(); // allocate space for last column holder hipMalloc((void**)&(m->cv), (m->r)*sizeof(float)); errcheck_gpu(); // allocate space for matrix on cpu m->cpu_data = (float*)malloc((m->rstride)*(m->cstride)*sizeof(float)); if (m->cpu_data == NULL) errcheck_cpu(); // zero out all data for safe padding zero_cpu(*m); zero_gpu(*m); } // initialize matrix m from ascii table void matrix_init_file(matrix *m, char *file_name) { // will need to do this dynamically if // we ever want to handle big tables!!! float table_data[line_buff_size][line_buff_size]; unsigned r = 0, c = 0; // open file & setup input buffer FILE *table = fopen(file_name, "r"); char line_buffer[line_buff_size]; // check if we were even able to open table file if (file_name == NULL) errcheck_cpu(); // buckle-up, don't drink and code memset(*table_data, '\0', line_buff_size*line_buff_size*sizeof(float)); // for each line in table - row while (fgets(line_buffer, line_buff_size, table)) { if (DEBUG > 5) printf("line buffer: %s\n", line_buffer); // set up string tokenizer on input buffer char *cur_val = strtok(line_buffer, " "); // Note reentrant or thread safe!! // don't increment num rows on blank line if (cur_val != NULL) { c = 0; // new row, reset col counter // for each token - col while (cur_val != NULL) { // convert from char to float table_data[r][c] = atof(cur_val); if (DEBUG > 5) printf("converting %d %d %f\n", r, c, table_data[r][c]); // get next token cur_val = strtok(NULL, " "); ++c; // increment num cols } ++r; // increment num rows } } // close file descriptor fclose(table); // initialize m matrix_init(m, r, c); // setup for data_at unsigned mr, mc; float *data = m->cpu_data; const unsigned stride = m->cstride; // loop through collected data and put into m for (mr = 0; mr < r; ++mr) for (mc = 0; mc < c; ++mc) data_at(mr,mc) = table_data[mr][mc]; } // load zeros into matrix m extern "C" void matrix_load_zero(matrix m) { float *data = m.cpu_data; const float *data_end = data + (m.rstride*m.cstride); // trashing, set sync set to cpu *(m.sync) = matrix_sync_cpu; // loop through all values and set to zero // use memset? while (data < data_end) *(data++) = 0.0f; } // load values from an array extern "C" void matrix_load_array(matrix m, float *v) { unsigned r, c, i = 0; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = v[i++]; } // load values from the random uniform distribution extern "C" void matrix_load_runif(matrix m, float min, float max) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; const float range = max - min; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); // needs work to be multithreaded! // should prolly use GPU?! // set each non-padding value from random uniform for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = rand_r(&rand_state) * range / RAND_MAX + min; } extern "C" void matrix_load_testa(matrix m, unsigned n) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) //data_at(r,c) = (float)n * (float)r*c; data_at(r,c) = (float)n; } extern "C" void matrix_load_testb(matrix m) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = (float)r; //data_at(r,c) = 1.0f; } extern "C" void matrix_load_testc(matrix m) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) //data_at(r,c) = (float)c; data_at(r,c) = 1.0f; } // copy values to an array extern "C" void matrix_unload_array(matrix m, float *v) { unsigned r, c, i = 0; float *data = m.cpu_data; const unsigned stride = m.cstride; matrix_sync_to_cpu(m); for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) v[i++] = data_at(r,c); } // write values to a file extern "C" void matrix_unload_file(matrix m, char *file_name) { unsigned r, c; float *data = m.cpu_data; const unsigned stride = m.cstride; // open file for writing FILE *table = fopen(file_name, "w"); // check if we were even able to open table file if (file_name == NULL) errcheck_cpu(); matrix_sync_to_cpu(m); for (r = 0; r < m.r; ++r) { for (c = 0; c < m.c-1; ++c) fprintf(table, "%.16f ", data_at(r,c)); fprintf(table, "%.16f\n", data_at(r,c)); } fclose(table); } // destroy matrix m extern "C" void matrix_dest(matrix *m) { free(m->cpu_data); // free matrix on cpu free(m->sync); // free sync state hipFree(m->gpu_data); // free matrix on gpu hipFree(m->cv); // free col holder // set everything to zero for safety m->r = 0; m->c = 0; m->rstride = 0; m->cstride = 0; m->sync = NULL; m->cpu_data = NULL; m->gpu_data = NULL; m->cv = NULL; } //** cpu/gpu synchronization // ensure current copy of matrix is on cpu void matrix_sync_to_cpu(matrix m) { // if not already on cpu if (*(m.sync) != matrix_sync_cpu) { // copy from device memory to host memory hipMemcpy(m.cpu_data, m.gpu_data, sizeof(float)*(m.rstride)*(m.cstride), hipMemcpyDeviceToHost); errcheck_gpu(); // set sync state to cpu *(m.sync) = matrix_sync_cpu; } } // ensure current copy of matrix is on gpu void matrix_sync_to_gpu(matrix m) { // if not already on gpu if (*(m.sync) != matrix_sync_gpu) { // copy from host memory to device memory hipMemcpy(m.gpu_data, m.cpu_data, sizeof(float)*(m.rstride)*(m.cstride), hipMemcpyHostToDevice); errcheck_gpu(); // set sync state to gpu *(m.sync) = matrix_sync_gpu; } } // wait for any gpu kernels to finish void matrix_wait() { hipDeviceSynchronize(); errcheck_gpu(); } //** Access // return pointer to cpu value at row r and col c float *matrix_at(matrix m, unsigned r, unsigned c) { // check not out of bounds! matrix_sync_to_cpu(m); return (m.cpu_data)+(r*m.cstride+c); } //** Addition/Subtraction // a = b + c extern "C" void matrix_add(matrix a, matrix b, matrix c) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for addition!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for addition!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( add_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( add_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } // a = b - c extern "C" void matrix_sub(matrix a, matrix b, matrix c) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for subtraction!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for subtraction!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( sub_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( sub_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } //** Multiplication // a = b * c extern "C" void matrix_mult(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (b.c != c.r) || (a.r != b.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Dimensions don't match for multiplication!\n", __LINE__); exit(CPU_ERROR); } #endif const dim3 block_size(mult_tn, mult_tp/mult_tn, 1); // remember, x is col and y is row here const unsigned grid_size_r = (mult_tm + a.r - 1) / mult_tm; const unsigned grid_size_c = (2*mult_tp + a.c - 1) / (2*mult_tp); //const unsigned grid_size_r = a.rstride / mult_tm; //const unsigned grid_size_c = a.cstride / (2*mult_tp); const dim3 grid_size(grid_size_c, grid_size_r, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( mult_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, a.cstride, b.gpu_data, b.cstride, b.c, c.gpu_data, c.cstride); errcheck_gpu(); } // a = phi(b * c) extern "C" void matrix_mult_phi(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (b.c != c.r) || (a.r != b.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Dimensions don't match for mult-apply!\n", __LINE__); exit(CPU_ERROR); } #endif const dim3 block_size(mult_tn, mult_tp/mult_tn, 1); // remember, x is col and y is row here const unsigned grid_size_r = (mult_tm + a.r - 1) / mult_tm; const unsigned grid_size_c = (2*mult_tp + a.c - 1) / (2*mult_tp); //const unsigned grid_size_r = a.rstride / mult_tm; //const unsigned grid_size_c = a.cstride / (2*mult_tp); const dim3 grid_size(grid_size_c, grid_size_r, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( mult_phi_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, a.cstride, b.gpu_data, b.cstride, b.c, c.gpu_data, c.cstride); errcheck_gpu(); } //** Transpose // a = b^T // a = b^T extern "C" void matrix_trans(matrix a, matrix b) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.c) || (a.c != b.r) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for transpose!\n", __LINE__); exit(CPU_ERROR); } #endif // divide into grid of trans_tile sized block const dim3 block_size(trans_tile_c, trans_tile_r, 1); const unsigned grid_size_x = (trans_tile_c + a.c - 1) / trans_tile_c; const unsigned grid_size_y = ((trans_tile_r * trans_stripe) + a.r - 1) / (trans_tile_r * trans_stripe); const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // call transpose kernel hipLaunchKernelGGL(( trans_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, a.r, a.cstride, b.cstride); errcheck_gpu(); } //** Pointwise miscellaneous // a = b^2 extern "C" void matrix_sqr(matrix a, matrix b) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (a.c != b.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for square!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( sqr_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( sqr_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, row*col, col); errcheck_gpu(); } } // scalar multiplication a = b*c extern "C" void matrix_scl(matrix a, float b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != c.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = c.r; const unsigned col = c.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( scl_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( scl_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b, c.gpu_data, row*col, col); errcheck_gpu(); } } // scalar multiplication & pointwise addition a = b.*c+d void matrix_scl_add(matrix a, float b, matrix c, matrix d) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(c); matrix_sync_to_gpu(d); #ifndef NO_ERRCHECK if ( (a.r != c.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } if ( (a.r != d.r) || (a.c != d.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = c.r; const unsigned col = c.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( scl_add_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b, c.gpu_data, d.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( scl_add_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b, c.gpu_data, d.gpu_data, row*col, col); errcheck_gpu(); } } // pointwise multiplication a = b.*c extern "C" void matrix_pmult(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for pmult!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for pmult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( pmult_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( pmult_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } // extern "C" void matrix_phi_prime(matrix a, matrix b) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (a.c != b.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for phi_prime!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( phi_prime_small_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( phi_prime_big_kern), dim3(grid_size), dim3(block_size), 0, 0, a.gpu_data, b.gpu_data, row*col, col); errcheck_gpu(); } } // extern "C" void matrix_delta(matrix delta, matrix y, matrix g) { *(delta.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(y); matrix_sync_to_gpu(g); #ifndef NO_ERRCHECK if ( (delta.r != y.r) || (y.r != g.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for delta!\n", __LINE__); exit(CPU_ERROR); } if ( (delta.c != y.c) || (y.c != g.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for delta!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = y.r; const unsigned col = y.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( delta_small_kern), dim3(grid_size), dim3(block_size), 0, 0, delta.gpu_data, y.gpu_data, g.gpu_data, 2.0f / (float)(g.r*g.c), row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } hipLaunchKernelGGL(( delta_big_kern), dim3(grid_size), dim3(block_size), 0, 0, delta.gpu_data, y.gpu_data, g.gpu_data, 2.0f / (float)(g.r*g.c), row*col, col); errcheck_gpu(); } } //** Combination/Separation // remove last row of m extern "C" void matrix_r0(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->c - 1) / r0_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // decriment number of rows --(m->r); // pass r0_kern gpu data beginning at last row hipLaunchKernelGGL(( r0_kern), dim3(grid_size), dim3(block_size), 0, 0, (m->gpu_data)+((m->r)*(m->cstride)), m->c); errcheck_gpu(); } // append a row of 1's to m // Note: we currently always leave enough padding so that we // can add one extra row. Can't do this more than once! extern "C" void matrix_r1(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->c - 1) / r0_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // pass r1_kern gpu data beginning at beginning of row after end hipLaunchKernelGGL(( r1_kern), dim3(grid_size), dim3(block_size), 0, 0, (m->gpu_data)+((m->r)*(m->cstride)), m->c); errcheck_gpu(); // increment number of rows ++(m->r); } // remove and save last col of m extern "C" void matrix_c0v(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->r - 1) / r0_tpb, 1, 1); --(m->c); hipLaunchKernelGGL(( c0v_kern), dim3(grid_size), dim3(block_size), 0, 0, (m->gpu_data) + (m->c), m->cv, m->r, m->cstride); errcheck_gpu(); } // restore last col of m extern "C" void matrix_cv(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->r - 1) / r0_tpb, 1, 1); hipLaunchKernelGGL(( cv_kern), dim3(grid_size), dim3(block_size), 0, 0, (m->gpu_data) + (m->c), m->cv, m->r, m->cstride); errcheck_gpu(); ++(m->c); } //** Error Measurement // rmse between values of actual and approx extern "C" float matrix_rmse(matrix actual, matrix approx) { // if dims don't match throw error! // matrix_sync_to_cpu(actual); matrix_sync_to_cpu(approx); // unsigned r, c; const unsigned len = actual.r*actual.c; float *d1 = actual.cpu_data; float *d2 = approx.cpu_data; float err = 0.0f; // #pragma omp parallel for shared(err) private(c,d1,d2) for (r = 0; r < actual.r; ++r) for (c = 0; c < actual.c; ++c) { unsigned i = r*actual.cstride+c; err += (d1[i] - d2[i])*(d1[i] - d2[i]); } return sqrt(err/len); } // return maximum relative error between approx and actual extern "C" float matrix_relerr_max(matrix actual, matrix approx) { unsigned i; // if matrices are different sizes // then return -1.0f if ( (approx.r != actual.r) || (approx.c != actual.c) ) return -1.0f; // synchronize both matrices to cpu matrix_sync_to_cpu(approx); matrix_sync_to_cpu(actual); // figure relative error float max_err = 0.0f; for (i = 0; i < (approx.rstride)*(approx.cstride); ++i) { // needs work here !!! float actual_cur = actual.cpu_data[i]; if (fabs(actual_cur) > 1.0e-10f) { float rel_err = fabs(actual_cur - approx.cpu_data[i]) / actual_cur; if (rel_err > max_err) max_err = rel_err; } } return max_err; } //** Output // print m to standard out extern "C" void matrix_print(matrix m) { unsigned r, c; const unsigned stride = m.cstride; float *data = m.cpu_data; // sync matrix to cpu matrix_sync_to_cpu(m); // print each value to stdout for (r = 0; r < m.r; ++r) { for (c = 0; c < m.c; ++c) printf("%f ", data_at(r,c)); printf("\n"); } } // print m including padding to standard out extern "C" void matrix_print_padded(matrix m) { unsigned r, c; // set data and stride for data_at const unsigned stride = m.cstride; float *data = m.cpu_data; // sync matrix to cpu matrix_sync_to_cpu(m); // print each value to stdout for (r = 0; r < m.rstride; ++r) { for (c = 0; c < m.cstride; ++c) printf("%f ", data_at(r,c)); printf("\n"); } }
610074226d17c247ca4e59f60bd95047fbad3852.cu
/*********************************\ * Cuda Accelerated Matrix Library * * * * by * * Elliott Forney * * 3.9.2010 * \*********************************/ /* need to fix all kernels to handle matrices with diff strides or else ensure that all matrices will have same stride in r0,v1,c0v,cv */ /* * Libraries */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <omp.h> #include "errcheck.h" #include "matrix.h" /* * Macros */ //** Debugging level #define DEBUG 0 //** Initialization // max number of chars per line in input table #define line_buff_size 1024 //** Access // row major indexing // expects stride and data to be set #define data_at(r,c) data[r*stride+c] //** Addition/Subtraction // when rows/cols < rlim/clim use small kernel // must be greater than or equal to add_big_tpb #define add_rlim 512 #define add_clim 512 // threads per block for small addition kernel #define add_small_tpb 64 // threads per block for big addition kernel // this must be smaller than and divide // lcm(mult_tn,2*mult_tp) evenly #define add_big_tpb 128 // stripe size for big addition kernel #define add_big_stripe 4 //** Multiplication // tile sizes #define mult_tm 20 #define mult_tn 16 #define mult_tp 64 /* small mult tile sizes, uncomment for debugging #define mult_tm 4 #define mult_tn 4 #define mult_tp 4 */ //** Transpose // tile sizes #define trans_tile_r 4 #define trans_tile_c 32 // virtualization #define trans_stripe 8 /* small tiles, uncomment for debugging #define trans_tile_r 2 #define trans_tile_c 4 #define trans_stripe 2 */ //** Combination/Separation // threads per block for add/remove row/col kernels #define r0_tpb 64 /* * Global variables */ //** random generator stuff needs to be redone, non-reentrant unsigned rand_state = 7; /* * Kernel function prototypes */ // sigmoid function __device__ float phi(float v); // derivative of sigmoid function given phi(v) __device__ float phi_prime(float z); // addition kernel for small matrices __global__ void add_small_kern(float *a, float *b, float *c, unsigned n); // addition kernel for big matrices __global__ void add_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // subtraction kernel for small matrices __global__ void sub_small_kern(float *a, float *b, float *c, unsigned n); // subtraction kernel for big matrices __global__ void sub_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // matrix multiplication kernel __global__ void mult_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c); // matrix multiplication plus component-wise function apply __global__ void mult_phi_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c); // transpose kernel __global__ void trans_kern(float *a, float *b, unsigned nrow, unsigned astride, unsigned bstride); // square kernel for small matrices __global__ void sqr_small_kern(float *a, float *b, unsigned n); // square kernel for large matrices __global__ void sqr_big_kern(float *a, float *b, unsigned n, unsigned stride); // scalar multiply kernel for small matrices __global__ void scl_small_kern(float *a, float b, float *c, unsigned n); // scalar multiply kernel for big matrices __global__ void scl_big_kern(float *a, float b, float *c, unsigned n, unsigned stride); // scalar multiply & pointwise addition kernel for small matrices __global__ void scl_add_small_kern(float *a, float b, float *c, float *d, unsigned n); // scalar multiply & pointwise addition kernel for big matrices __global__ void scl_add_big_kern(float *a, float b, float *c, float *d, unsigned n, unsigned stride); // pointwise multiply kernel for small matrices __global__ void pmult_small_kern(float *a, float *b, float *c, unsigned n); // pointwise multiply kernel for big matrices __global__ void pmult_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride); // __global__ void phi_prime_small_kern(float *a, float *b, unsigned n); // __global__ void phi_prime_big_kern(float *a, float *b, unsigned n, unsigned stride); // __global__ void delta_small_kern(float *a, float *b, float *c, float denom, unsigned n); // __global__ void delta_big_kern(float *a, float *b, float *c, float denom, unsigned n, unsigned stride); // remove last row __global__ void r0_kern(float *m, unsigned nrow, unsigned ncol); // add row of ones kernel __global__ void r1_kern(float *m, unsigned nrow, unsigned ncol); // __global__ void c0v_kern(float *a, float *b, unsigned nrow, unsigned stride); // __global__ void cv_kern(float *a, float *b, unsigned nrow, unsigned stride); // zero out all values in m for small matrices __global__ void zero_small_kern(float *m, unsigned n); // zero out all values in m for big matrices __global__ void zero_big_kern(float *m, unsigned n, unsigned stride); /* * Kernel function bodies */ // sigmoid function __device__ float phi(float v) { // recommended by Lecun, find citation!! return 1.7159f * tanh( (2.0f/3.0f) * v ); } // derivative of sigmoid function given phi(v) __device__ float phi_prime(float z) { return (2.0f/3.0f) * (1.7159f - z*z); } // addition kernel for small matrices __global__ void add_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // sum one value a[id] = b[id] + c[id]; } // addition kernel for big matrices __global__ void add_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread sums down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] + c[i]; // sum value } // subtraction kernel for small matrices __global__ void sub_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // subtract one value a[id] = b[id] - c[id]; } // subtraction kernel for big matrices __global__ void sub_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread sums down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] - c[i]; // subtract value } // matrix multiplication kernel // expects padded to tile sizes __global__ void mult_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c) { // loop counter unsigned i, j; // starting positions in respective tiles float *acur, *bcur, *ccur; { const unsigned block_pos_r = blockIdx.y*mult_tm; const unsigned block_pos_c = blockIdx.x*2*mult_tp; acur = a + block_pos_c + block_pos_r * a_c + threadIdx.x + threadIdx.y * mult_tn; bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x; ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn; } // end of last tile in b const float *bend = bcur + b_csub; // current a values // two way virtualization float aval_v1[mult_tm]; float aval_v2[mult_tm]; // initialize a values to zero #pragma unroll for (i = 0; i < mult_tm; ++i) { aval_v1[i] = 0.0f; aval_v2[i] = 0.0f; } // for each tile read from b do { // allocate shared space for tile in b __shared__ float bs[mult_tn][mult_tm+1]; // put tile from b into shared memory #pragma unroll for (i = 0; i < mult_tm; i += (mult_tp/mult_tn)) bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c]; // move b's tile across bcur += mult_tn; // synchronize to ensure bll elements are read __syncthreads(); // for each row in tile of c #pragma unroll for (i = 0; i < mult_tn; ++i) { // do mults and adds #pragma unroll for (j = 0; j < mult_tm; ++j) { aval_v1[j] += bs[i][j] * ccur[0]; aval_v2[j] += bs[i][j] * ccur[mult_tp]; } ccur += c_c; } __syncthreads(); } while (bcur < bend); // until last tile in b // copy results to global memory #pragma unroll for (i = 0; i < mult_tm; ++i, acur += a_c) { acur[0] = aval_v1[i]; acur[mult_tp] = aval_v2[i]; } } // matrix multiplication plus component-wise function apply __global__ void mult_phi_kern(float *a, unsigned a_c, float *b, unsigned b_c, unsigned b_csub, float *c, unsigned c_c) { // loop counter unsigned i, j; // starting positions in respective tiles float *acur, *bcur, *ccur; { const unsigned block_pos_r = blockIdx.y*mult_tm; const unsigned block_pos_c = blockIdx.x*2*mult_tp; acur = a + block_pos_c + block_pos_r * a_c + threadIdx.x + threadIdx.y * mult_tn; bcur = b + (block_pos_r + threadIdx.y) * b_c + threadIdx.x; ccur = c + block_pos_c + threadIdx.x + threadIdx.y * mult_tn; } // end of last tile in b const float *bend = bcur + b_csub; // current a values // two way virtualization float aval_v1[mult_tm]; float aval_v2[mult_tm]; // initialize a values to zero #pragma unroll for (i = 0; i < mult_tm; ++i) { aval_v1[i] = 0.0f; aval_v2[i] = 0.0f; } // for each tile read from b do { // allocate shared space for tile in b __shared__ float bs[mult_tn][mult_tm+1]; // put tile from b into shared memory #pragma unroll for (i = 0; i < mult_tm; i += (mult_tp/mult_tn)) bs[threadIdx.x][threadIdx.y+i] = bcur[i*b_c]; // move b's tile across bcur += mult_tn; // synchronize to ensure bll elements are read __syncthreads(); // for each row in tile of c #pragma unroll for (i = 0; i < mult_tn; ++i) { // do mults and adds #pragma unroll for (j = 0; j < mult_tm; ++j) { aval_v1[j] += bs[i][j] * ccur[0]; aval_v2[j] += bs[i][j] * ccur[mult_tp]; } ccur += c_c; } __syncthreads(); } while (bcur < bend); // until last tile in b // copy results to global memory #pragma unroll for (i = 0; i < mult_tm; ++i, acur += a_c) { acur[0] = phi(aval_v1[i]); acur[mult_tp] = phi(aval_v2[i]); } } // transpose kernel // expects padded to tile size __global__ void trans_kern(float *a, float *b, unsigned nrow, unsigned astride, unsigned bstride) { unsigned i, blockIdx_x, blockIdx_y; // diagonal reordering to prevent partition camping // borrowed from NVIDIA CUDA SDK, Thanks! if (nrow == astride) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { const unsigned bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // const unsigned tile_r_stripe = trans_tile_r * trans_stripe; const unsigned tid_y_stripe = threadIdx.y * trans_stripe; // starting row and col in a const unsigned block_row = blockIdx_y * tile_r_stripe; const unsigned block_col = blockIdx_x * trans_tile_c; // thread's row and col in b unsigned row = block_col + tid_y_stripe; unsigned col = block_row + threadIdx.x; // perform tile transpose in shared memory __shared__ float tile[trans_tile_c][tile_r_stripe+1]; unsigned base = row*bstride + col; // read transposed values in from b #pragma unroll for (i = 0; i < trans_stripe; ++i) tile[threadIdx.x][tid_y_stripe+i] = b[base+i*bstride]; // wait for all threads to finish reading into shared mem __syncthreads(); // thread's row and col in a row = block_row + tid_y_stripe; col = block_col + threadIdx.x; base = row*astride + col; // write tile into a #pragma unroll for (i = 0; i < trans_stripe; ++i) a[base+i*astride] = tile[tid_y_stripe+i][threadIdx.x]; } // square kernel for small matrices __global__ void sqr_small_kern(float *a, float *b, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) { // square single value float bval = b[id]; a[id] = bval*bval; } } // square kernel for large matrices __global__ void sqr_big_kern(float *a, float *b, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) { float bval = b[i]; a[i] = bval*bval; } } // scalar multiply kernel for small matrices __global__ void scl_small_kern(float *a, float b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = b*c[id]; } // scalar multiply kernel for big matrices __global__ void scl_big_kern(float *a, float b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) a[i] = b*c[id]; } // scalar multiply & pointwise addition kernel for small matrices __global__ void scl_add_small_kern(float *a, float b, float *c, float *d, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = b*c[id]+d[id]; } // scalar multiply & pointwise addition kernel for big matrices __global__ void scl_add_big_kern(float *a, float b, float *c, float *d, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) a[i] = b*c[id]+d[id]; } // pointwise multiply kernel for small matrices __global__ void pmult_small_kern(float *a, float *b, float *c, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // mult one value a[id] = b[id] * c[id]; } // pointwise multiply kernel for big matrices __global__ void pmult_big_kern(float *a, float *b, float *c, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread mults down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = b[i] * c[i]; // mult value } // __global__ void phi_prime_small_kern(float *a, float *b, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // sum one value a[id] = phi_prime(b[id]); } // __global__ void phi_prime_big_kern(float *a, float *b, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread applys down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = phi_prime(b[i]); } // __global__ void delta_small_kern(float *a, float *b, float *c, float denom, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) a[id] = (b[id] - c[id]) * denom; } // __global__ void delta_big_kern(float *a, float *b, float *c, float denom, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // if inside matrix a[i] = (b[i] - c[i]) * denom; } // remove last row kernel // m should point to beginning of last row __global__ void r0_kern(float *m, unsigned ncol) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if not past last col if (id < ncol) // set a single value to zero m[id] = 0.0f; } // add row of ones kernel // m should point to first non-existant row __global__ void r1_kern(float *m, unsigned ncol) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if not past last col if (id < ncol) // set a single value to one m[id] = 1.0f; } // __global__ void c0v_kern(float *a, float *b, unsigned nrow, unsigned stride) { const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; if (id < nrow) { float *aval = a + stride * id; b[id] = *aval; *aval = 0.0f; } } // __global__ void cv_kern(float *a, float *b, unsigned nrow, unsigned stride) { const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; if (id < nrow) a[stride * id] = b[id]; } // zero out all values in m for small matrices __global__ void zero_small_kern(float *m, unsigned n) { // unique id for each thread 0, ..., (nthreads-1) const unsigned id = threadIdx.x + blockIdx.x * blockDim.x; // if inside matrix if (id < n) // write zero m[id] = 0.0f; } // zero out all values in m for big matrices __global__ void zero_big_kern(float *m, unsigned n, unsigned stride) { unsigned i; // unique id for each block, strided according to stripe size const unsigned block_index = blockIdx.x + blockIdx.y * gridDim.x * add_big_stripe; // unique id for each thread const unsigned id = threadIdx.x + block_index * blockDim.x; // each thread operates down a column stripe times #pragma unroll for (i = id; i < id+add_big_stripe*stride; i += stride) if (i < n) // write zero m[i] = 0.0f; } /* * Internal function prototypes */ // zero out all values on cpu, does not set sync state! void zero_cpu(matrix m); // zero out all values on gpu, does not set sync state! void zero_gpu(matrix m); /* * Internal function bodies */ // zero out all values on cpu, does not set sync state! void zero_cpu(matrix m) { memset(m.cpu_data, '\0', (m.rstride)*(m.cstride)*sizeof(float)); } // zero out all values on gpu, does not set sync state! void zero_gpu(matrix m) { const unsigned row = m.rstride; const unsigned col = m.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); // call small zero kernel zero_small_kern<<<grid_size, block_size>>>(m.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); // call big zero kernel zero_big_kern<<<grid_size, block_size>>>(m.gpu_data, row*col, col); errcheck_gpu(); } } /* * External function bodies */ //** Initialization & Destruction // initialize matrix m of size r by c extern "C" void matrix_init(matrix *m, unsigned r, unsigned c) { // set matrix dimensions m->r = r; m->c = c; // leave extra room for zero padding to make multiple of tile sizes // always leave an extra row & col for bias terms m->rstride = r + (80 - (r % 80)); // lcm(mult_tn,mult_tm) m->cstride = c + (128 - (c % 128)); // lcm(mult_tn,2*mult_tp) /* m->rstride = r + (640 - (r % 640)); m->cstride = c + (640 - (c % 640)); */ /* uncomment to debug without extra row & col of padding if ((r % 80) == 0) m->rstride = r; else m->rstride = r + (80 - (r % 80)); if ((c % 128) == 0) m->cstride = c; else m->cstride = c + (128 - (c % 128)); */ /* uncomment to debug with small tiles 4,4,4 m->rstride = r + (8 - (r % 8)); m->cstride = c + (8 - (c % 8)); */ if (DEBUG > 1) { printf("matrix size: %d %d\n", m->r, m->c); printf("with padding: %d %d\n", m->rstride, m->cstride); } // allocate space for sync state // done dynamically so we can pass copies of matrices // and utilize them without screwing up state m->sync = (matrix_sync_state*)malloc(sizeof(matrix_sync_state)); if (m->sync == NULL) errcheck_cpu(); // set initial sync to cpu *(m->sync) = matrix_sync_cpu; // allocate space for matrix on gpu cudaMalloc((void**)&(m->gpu_data), (m->rstride)*(m->cstride)*sizeof(float)); errcheck_gpu(); // allocate space for last column holder cudaMalloc((void**)&(m->cv), (m->r)*sizeof(float)); errcheck_gpu(); // allocate space for matrix on cpu m->cpu_data = (float*)malloc((m->rstride)*(m->cstride)*sizeof(float)); if (m->cpu_data == NULL) errcheck_cpu(); // zero out all data for safe padding zero_cpu(*m); zero_gpu(*m); } // initialize matrix m from ascii table void matrix_init_file(matrix *m, char *file_name) { // will need to do this dynamically if // we ever want to handle big tables!!! float table_data[line_buff_size][line_buff_size]; unsigned r = 0, c = 0; // open file & setup input buffer FILE *table = fopen(file_name, "r"); char line_buffer[line_buff_size]; // check if we were even able to open table file if (file_name == NULL) errcheck_cpu(); // buckle-up, don't drink and code memset(*table_data, '\0', line_buff_size*line_buff_size*sizeof(float)); // for each line in table - row while (fgets(line_buffer, line_buff_size, table)) { if (DEBUG > 5) printf("line buffer: %s\n", line_buffer); // set up string tokenizer on input buffer char *cur_val = strtok(line_buffer, " "); // Note reentrant or thread safe!! // don't increment num rows on blank line if (cur_val != NULL) { c = 0; // new row, reset col counter // for each token - col while (cur_val != NULL) { // convert from char to float table_data[r][c] = atof(cur_val); if (DEBUG > 5) printf("converting %d %d %f\n", r, c, table_data[r][c]); // get next token cur_val = strtok(NULL, " "); ++c; // increment num cols } ++r; // increment num rows } } // close file descriptor fclose(table); // initialize m matrix_init(m, r, c); // setup for data_at unsigned mr, mc; float *data = m->cpu_data; const unsigned stride = m->cstride; // loop through collected data and put into m for (mr = 0; mr < r; ++mr) for (mc = 0; mc < c; ++mc) data_at(mr,mc) = table_data[mr][mc]; } // load zeros into matrix m extern "C" void matrix_load_zero(matrix m) { float *data = m.cpu_data; const float *data_end = data + (m.rstride*m.cstride); // trashing, set sync set to cpu *(m.sync) = matrix_sync_cpu; // loop through all values and set to zero // use memset? while (data < data_end) *(data++) = 0.0f; } // load values from an array extern "C" void matrix_load_array(matrix m, float *v) { unsigned r, c, i = 0; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = v[i++]; } // load values from the random uniform distribution extern "C" void matrix_load_runif(matrix m, float min, float max) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; const float range = max - min; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); // needs work to be multithreaded! // should prolly use GPU?! // set each non-padding value from random uniform for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = rand_r(&rand_state) * range / RAND_MAX + min; } extern "C" void matrix_load_testa(matrix m, unsigned n) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) //data_at(r,c) = (float)n * (float)r*c; data_at(r,c) = (float)n; } extern "C" void matrix_load_testb(matrix m) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) data_at(r,c) = (float)r; //data_at(r,c) = 1.0f; } extern "C" void matrix_load_testc(matrix m) { unsigned r, c; // set data and stride for data_at float *data = m.cpu_data; const unsigned stride = m.cstride; *(m.sync) = matrix_sync_cpu; matrix_load_zero(m); #pragma omp parallel for private(c) for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) //data_at(r,c) = (float)c; data_at(r,c) = 1.0f; } // copy values to an array extern "C" void matrix_unload_array(matrix m, float *v) { unsigned r, c, i = 0; float *data = m.cpu_data; const unsigned stride = m.cstride; matrix_sync_to_cpu(m); for (r = 0; r < m.r; ++r) for (c = 0; c < m.c; ++c) v[i++] = data_at(r,c); } // write values to a file extern "C" void matrix_unload_file(matrix m, char *file_name) { unsigned r, c; float *data = m.cpu_data; const unsigned stride = m.cstride; // open file for writing FILE *table = fopen(file_name, "w"); // check if we were even able to open table file if (file_name == NULL) errcheck_cpu(); matrix_sync_to_cpu(m); for (r = 0; r < m.r; ++r) { for (c = 0; c < m.c-1; ++c) fprintf(table, "%.16f ", data_at(r,c)); fprintf(table, "%.16f\n", data_at(r,c)); } fclose(table); } // destroy matrix m extern "C" void matrix_dest(matrix *m) { free(m->cpu_data); // free matrix on cpu free(m->sync); // free sync state cudaFree(m->gpu_data); // free matrix on gpu cudaFree(m->cv); // free col holder // set everything to zero for safety m->r = 0; m->c = 0; m->rstride = 0; m->cstride = 0; m->sync = NULL; m->cpu_data = NULL; m->gpu_data = NULL; m->cv = NULL; } //** cpu/gpu synchronization // ensure current copy of matrix is on cpu void matrix_sync_to_cpu(matrix m) { // if not already on cpu if (*(m.sync) != matrix_sync_cpu) { // copy from device memory to host memory cudaMemcpy(m.cpu_data, m.gpu_data, sizeof(float)*(m.rstride)*(m.cstride), cudaMemcpyDeviceToHost); errcheck_gpu(); // set sync state to cpu *(m.sync) = matrix_sync_cpu; } } // ensure current copy of matrix is on gpu void matrix_sync_to_gpu(matrix m) { // if not already on gpu if (*(m.sync) != matrix_sync_gpu) { // copy from host memory to device memory cudaMemcpy(m.gpu_data, m.cpu_data, sizeof(float)*(m.rstride)*(m.cstride), cudaMemcpyHostToDevice); errcheck_gpu(); // set sync state to gpu *(m.sync) = matrix_sync_gpu; } } // wait for any gpu kernels to finish void matrix_wait() { cudaDeviceSynchronize(); errcheck_gpu(); } //** Access // return pointer to cpu value at row r and col c float *matrix_at(matrix m, unsigned r, unsigned c) { // check not out of bounds! matrix_sync_to_cpu(m); return (m.cpu_data)+(r*m.cstride+c); } //** Addition/Subtraction // a = b + c extern "C" void matrix_add(matrix a, matrix b, matrix c) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for addition!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for addition!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } add_small_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } add_big_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } // a = b - c extern "C" void matrix_sub(matrix a, matrix b, matrix c) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for subtraction!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for subtraction!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } sub_small_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } sub_big_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } //** Multiplication // a = b * c extern "C" void matrix_mult(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (b.c != c.r) || (a.r != b.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Dimensions don't match for multiplication!\n", __LINE__); exit(CPU_ERROR); } #endif const dim3 block_size(mult_tn, mult_tp/mult_tn, 1); // remember, x is col and y is row here const unsigned grid_size_r = (mult_tm + a.r - 1) / mult_tm; const unsigned grid_size_c = (2*mult_tp + a.c - 1) / (2*mult_tp); //const unsigned grid_size_r = a.rstride / mult_tm; //const unsigned grid_size_c = a.cstride / (2*mult_tp); const dim3 grid_size(grid_size_c, grid_size_r, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } mult_kern<<<grid_size, block_size>>>(a.gpu_data, a.cstride, b.gpu_data, b.cstride, b.c, c.gpu_data, c.cstride); errcheck_gpu(); } // a = phi(b * c) extern "C" void matrix_mult_phi(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (b.c != c.r) || (a.r != b.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Dimensions don't match for mult-apply!\n", __LINE__); exit(CPU_ERROR); } #endif const dim3 block_size(mult_tn, mult_tp/mult_tn, 1); // remember, x is col and y is row here const unsigned grid_size_r = (mult_tm + a.r - 1) / mult_tm; const unsigned grid_size_c = (2*mult_tp + a.c - 1) / (2*mult_tp); //const unsigned grid_size_r = a.rstride / mult_tm; //const unsigned grid_size_c = a.cstride / (2*mult_tp); const dim3 grid_size(grid_size_c, grid_size_r, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } mult_phi_kern<<<grid_size, block_size>>>(a.gpu_data, a.cstride, b.gpu_data, b.cstride, b.c, c.gpu_data, c.cstride); errcheck_gpu(); } //** Transpose // a = b^T // a = b^T extern "C" void matrix_trans(matrix a, matrix b) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.c) || (a.c != b.r) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for transpose!\n", __LINE__); exit(CPU_ERROR); } #endif // divide into grid of trans_tile sized block const dim3 block_size(trans_tile_c, trans_tile_r, 1); const unsigned grid_size_x = (trans_tile_c + a.c - 1) / trans_tile_c; const unsigned grid_size_y = ((trans_tile_r * trans_stripe) + a.r - 1) / (trans_tile_r * trans_stripe); const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // call transpose kernel trans_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, a.r, a.cstride, b.cstride); errcheck_gpu(); } //** Pointwise miscellaneous // a = b^2 extern "C" void matrix_sqr(matrix a, matrix b) { // sync matrices to gpu *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (a.c != b.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for square!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } sqr_small_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } sqr_big_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, row*col, col); errcheck_gpu(); } } // scalar multiplication a = b*c extern "C" void matrix_scl(matrix a, float b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != c.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = c.r; const unsigned col = c.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } scl_small_kern<<<grid_size, block_size>>>(a.gpu_data, b, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } scl_big_kern<<<grid_size, block_size>>>(a.gpu_data, b, c.gpu_data, row*col, col); errcheck_gpu(); } } // scalar multiplication & pointwise addition a = b.*c+d void matrix_scl_add(matrix a, float b, matrix c, matrix d) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(c); matrix_sync_to_gpu(d); #ifndef NO_ERRCHECK if ( (a.r != c.r) || (a.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } if ( (a.r != d.r) || (a.c != d.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for scalar mult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = c.r; const unsigned col = c.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } scl_add_small_kern<<<grid_size, block_size>>>(a.gpu_data, b, c.gpu_data, d.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } scl_add_big_kern<<<grid_size, block_size>>>(a.gpu_data, b, c.gpu_data, d.gpu_data, row*col, col); errcheck_gpu(); } } // pointwise multiplication a = b.*c extern "C" void matrix_pmult(matrix a, matrix b, matrix c) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); matrix_sync_to_gpu(c); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (b.r != c.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for pmult!\n", __LINE__); exit(CPU_ERROR); } if ( (a.c != b.c) || (b.c != c.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for pmult!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } pmult_small_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } pmult_big_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, c.gpu_data, row*col, col); errcheck_gpu(); } } // extern "C" void matrix_phi_prime(matrix a, matrix b) { *(a.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(b); #ifndef NO_ERRCHECK if ( (a.r != b.r) || (a.c != b.c) ) { fprintf(stderr, __FILE__ " %d: " "Source and destination dimensions don't match for phi_prime!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = b.r; const unsigned col = b.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } phi_prime_small_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } phi_prime_big_kern<<<grid_size, block_size>>>(a.gpu_data, b.gpu_data, row*col, col); errcheck_gpu(); } } // extern "C" void matrix_delta(matrix delta, matrix y, matrix g) { *(delta.sync) = matrix_sync_gpu; // trashing, just set matrix_sync_to_gpu(y); matrix_sync_to_gpu(g); #ifndef NO_ERRCHECK if ( (delta.r != y.r) || (y.r != g.r) ) { fprintf(stderr, __FILE__ " %d: " "Rows don't match for delta!\n", __LINE__); exit(CPU_ERROR); } if ( (delta.c != y.c) || (y.c != g.c) ) { fprintf(stderr, __FILE__ " %d: " "Cols don't match for delta!\n", __LINE__); exit(CPU_ERROR); } #endif const unsigned row = y.r; const unsigned col = y.cstride; // if matrix dimensions are smaller than limits if ((row < add_rlim) || (col < add_clim)) { // set threads per block as parameterized const dim3 block_size(add_small_tpb, 1, 1); // treat like vector, num blocks is n / tpb const dim3 grid_size((add_small_tpb + row*col - 1) / add_small_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } delta_small_kern<<<grid_size, block_size>>>(delta.gpu_data, y.gpu_data, g.gpu_data, 2.0f / (float)(g.r*g.c), row*col); errcheck_gpu(); } else { // set threads per block as parameterized const dim3 block_size(add_big_tpb, 1, 1); // across rows we have ncol / tpb blocks const unsigned grid_size_x = (add_big_tpb + col - 1) / add_big_tpb; // down cols we have nrow / stripe blocks const unsigned grid_size_y = (add_big_stripe + row - 1) / add_big_stripe; const dim3 grid_size(grid_size_x, grid_size_y, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } delta_big_kern<<<grid_size, block_size>>>(delta.gpu_data, y.gpu_data, g.gpu_data, 2.0f / (float)(g.r*g.c), row*col, col); errcheck_gpu(); } } //** Combination/Separation // remove last row of m extern "C" void matrix_r0(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->c - 1) / r0_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // decriment number of rows --(m->r); // pass r0_kern gpu data beginning at last row r0_kern<<<grid_size, block_size>>>((m->gpu_data)+((m->r)*(m->cstride)), m->c); errcheck_gpu(); } // append a row of 1's to m // Note: we currently always leave enough padding so that we // can add one extra row. Can't do this more than once! extern "C" void matrix_r1(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->c - 1) / r0_tpb, 1, 1); if (DEBUG > 0) { printf("block size: %d %d\n", block_size.x, block_size.y); printf("grid size: %d %d\n", grid_size.x, grid_size.y); } // pass r1_kern gpu data beginning at beginning of row after end r1_kern<<<grid_size, block_size>>>((m->gpu_data)+((m->r)*(m->cstride)), m->c); errcheck_gpu(); // increment number of rows ++(m->r); } // remove and save last col of m extern "C" void matrix_c0v(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->r - 1) / r0_tpb, 1, 1); --(m->c); c0v_kern<<<grid_size, block_size>>>((m->gpu_data) + (m->c), m->cv, m->r, m->cstride); errcheck_gpu(); } // restore last col of m extern "C" void matrix_cv(matrix *m) { matrix_sync_to_gpu(*m); const dim3 block_size(r0_tpb, 1, 1); const dim3 grid_size((r0_tpb + m->r - 1) / r0_tpb, 1, 1); cv_kern<<<grid_size, block_size>>>((m->gpu_data) + (m->c), m->cv, m->r, m->cstride); errcheck_gpu(); ++(m->c); } //** Error Measurement // rmse between values of actual and approx extern "C" float matrix_rmse(matrix actual, matrix approx) { // if dims don't match throw error! // matrix_sync_to_cpu(actual); matrix_sync_to_cpu(approx); // unsigned r, c; const unsigned len = actual.r*actual.c; float *d1 = actual.cpu_data; float *d2 = approx.cpu_data; float err = 0.0f; // #pragma omp parallel for shared(err) private(c,d1,d2) for (r = 0; r < actual.r; ++r) for (c = 0; c < actual.c; ++c) { unsigned i = r*actual.cstride+c; err += (d1[i] - d2[i])*(d1[i] - d2[i]); } return sqrt(err/len); } // return maximum relative error between approx and actual extern "C" float matrix_relerr_max(matrix actual, matrix approx) { unsigned i; // if matrices are different sizes // then return -1.0f if ( (approx.r != actual.r) || (approx.c != actual.c) ) return -1.0f; // synchronize both matrices to cpu matrix_sync_to_cpu(approx); matrix_sync_to_cpu(actual); // figure relative error float max_err = 0.0f; for (i = 0; i < (approx.rstride)*(approx.cstride); ++i) { // needs work here !!! float actual_cur = actual.cpu_data[i]; if (fabs(actual_cur) > 1.0e-10f) { float rel_err = fabs(actual_cur - approx.cpu_data[i]) / actual_cur; if (rel_err > max_err) max_err = rel_err; } } return max_err; } //** Output // print m to standard out extern "C" void matrix_print(matrix m) { unsigned r, c; const unsigned stride = m.cstride; float *data = m.cpu_data; // sync matrix to cpu matrix_sync_to_cpu(m); // print each value to stdout for (r = 0; r < m.r; ++r) { for (c = 0; c < m.c; ++c) printf("%f ", data_at(r,c)); printf("\n"); } } // print m including padding to standard out extern "C" void matrix_print_padded(matrix m) { unsigned r, c; // set data and stride for data_at const unsigned stride = m.cstride; float *data = m.cpu_data; // sync matrix to cpu matrix_sync_to_cpu(m); // print each value to stdout for (r = 0; r < m.rstride; ++r) { for (c = 0; c < m.cstride; ++c) printf("%f ", data_at(r,c)); printf("\n"); } }
1baaa6e343fa60ebd3b7898ce2599d834abb7045.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dot_product.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> namespace NKernel { template <typename T, int BLOCK_SIZE> __global__ void DotProductImpl(const T *x, const T *y, T *partResults, ui64 size) { __shared__ T sdata[BLOCK_SIZE]; ui32 tid = threadIdx.x; ui32 i = blockIdx.x * BLOCK_SIZE * 2 + tid; T valx = i < size ? __ldg(x + i) : 0; T valy = i < size ? __ldg(y + i) : 0; T val2x = i + BLOCK_SIZE < size ? __ldg(x + i + BLOCK_SIZE) : 0; T val2y = i + BLOCK_SIZE < size ? __ldg(y + i + BLOCK_SIZE) : 0; sdata[tid] = valx * valy + val2x * val2y; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) { partResults[blockIdx.x] = sdata[0]; } } template <typename T> void DotProduct(const T *x, const T *y, TDotProductContext<T>& context, TCudaStream stream) { const ui32 blockSize = GetDotProductBlockSize(); DotProductImpl<T, blockSize> << < context.NumBlocks, blockSize, 0, stream >> > (x, y, context.PartResults.Get(), context.Size); } template <typename T, int BLOCK_SIZE> __global__ void WeightedDotProductImpl(const T *x, const T *weights, const T *y, T *partResults, ui64 size) { __shared__ T sdata[BLOCK_SIZE]; ui32 tid = threadIdx.x; ui32 i = blockIdx.x * BLOCK_SIZE * 2 + tid; T valx = i < size ? __ldg(x + i) : 0; T valy = i < size ? __ldg(y + i) : 0; T weight = i < size ? __ldg(weights + i) : 0; T val2x = i + BLOCK_SIZE < size ? __ldg(x + i + BLOCK_SIZE) : 0.; T val2y = i + BLOCK_SIZE < size ? __ldg(y + i + BLOCK_SIZE) : 0; T weight2 = i + BLOCK_SIZE < size ? __ldg(weights + i + BLOCK_SIZE) : 0; sdata[tid] = weight * valx * valy + weight2 * val2x * val2y; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) partResults[blockIdx.x] = sdata[0]; } template <typename T> void WeightedDotProduct(const T *x, const T *weights, const T *y, TDotProductContext<T>& context, TCudaStream stream) { const ui32 blockSize = GetDotProductBlockSize(); WeightedDotProductImpl<T, blockSize> << < context.NumBlocks, blockSize, 0, stream >> > (x, weights, y, context.PartResults.Get(), context.Size); } template void DotProduct<int>(const int *x, const int *y, TDotProductContext<int>& ctx, TCudaStream stream); template void DotProduct<double>(const double *x, const double *y, TDotProductContext<double>& ctx, TCudaStream stream); template void DotProduct<ui32>(const ui32 *x, const ui32 *y, TDotProductContext<ui32>& ctx, TCudaStream stream); template void DotProduct<float>(const float *x, const float *y, TDotProductContext<float>& ctx, TCudaStream stream); template void WeightedDotProduct<float>(const float *x, const float *weight, const float *y, TDotProductContext<float>& ctx, TCudaStream stream); template void WeightedDotProduct<double>(const double *x, const double *weight, const double *y, TDotProductContext<double>& ctx, TCudaStream stream); }
1baaa6e343fa60ebd3b7898ce2599d834abb7045.cu
#include "dot_product.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> namespace NKernel { template <typename T, int BLOCK_SIZE> __global__ void DotProductImpl(const T *x, const T *y, T *partResults, ui64 size) { __shared__ T sdata[BLOCK_SIZE]; ui32 tid = threadIdx.x; ui32 i = blockIdx.x * BLOCK_SIZE * 2 + tid; T valx = i < size ? __ldg(x + i) : 0; T valy = i < size ? __ldg(y + i) : 0; T val2x = i + BLOCK_SIZE < size ? __ldg(x + i + BLOCK_SIZE) : 0; T val2y = i + BLOCK_SIZE < size ? __ldg(y + i + BLOCK_SIZE) : 0; sdata[tid] = valx * valy + val2x * val2y; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) { partResults[blockIdx.x] = sdata[0]; } } template <typename T> void DotProduct(const T *x, const T *y, TDotProductContext<T>& context, TCudaStream stream) { const ui32 blockSize = GetDotProductBlockSize(); DotProductImpl<T, blockSize> << < context.NumBlocks, blockSize, 0, stream >> > (x, y, context.PartResults.Get(), context.Size); } template <typename T, int BLOCK_SIZE> __global__ void WeightedDotProductImpl(const T *x, const T *weights, const T *y, T *partResults, ui64 size) { __shared__ T sdata[BLOCK_SIZE]; ui32 tid = threadIdx.x; ui32 i = blockIdx.x * BLOCK_SIZE * 2 + tid; T valx = i < size ? __ldg(x + i) : 0; T valy = i < size ? __ldg(y + i) : 0; T weight = i < size ? __ldg(weights + i) : 0; T val2x = i + BLOCK_SIZE < size ? __ldg(x + i + BLOCK_SIZE) : 0.; T val2y = i + BLOCK_SIZE < size ? __ldg(y + i + BLOCK_SIZE) : 0; T weight2 = i + BLOCK_SIZE < size ? __ldg(weights + i + BLOCK_SIZE) : 0; sdata[tid] = weight * valx * valy + weight2 * val2x * val2y; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) partResults[blockIdx.x] = sdata[0]; } template <typename T> void WeightedDotProduct(const T *x, const T *weights, const T *y, TDotProductContext<T>& context, TCudaStream stream) { const ui32 blockSize = GetDotProductBlockSize(); WeightedDotProductImpl<T, blockSize> << < context.NumBlocks, blockSize, 0, stream >> > (x, weights, y, context.PartResults.Get(), context.Size); } template void DotProduct<int>(const int *x, const int *y, TDotProductContext<int>& ctx, TCudaStream stream); template void DotProduct<double>(const double *x, const double *y, TDotProductContext<double>& ctx, TCudaStream stream); template void DotProduct<ui32>(const ui32 *x, const ui32 *y, TDotProductContext<ui32>& ctx, TCudaStream stream); template void DotProduct<float>(const float *x, const float *y, TDotProductContext<float>& ctx, TCudaStream stream); template void WeightedDotProduct<float>(const float *x, const float *weight, const float *y, TDotProductContext<float>& ctx, TCudaStream stream); template void WeightedDotProduct<double>(const double *x, const double *weight, const double *y, TDotProductContext<double>& ctx, TCudaStream stream); }
5e3d20c75f9832df0b12f9020ed804ab487aa08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstdio> #include <vector> #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/kernels/funcs/top_k_function_cuda.h" // set cub base traits in order to handle float16 namespace paddle { namespace operators { #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_MAXLENGTH_BASE(MaxLength, ...) \ case (MaxLength): { \ constexpr auto maxLength = (MaxLength); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) #define FIXED_MAXLENGTH(...) \ FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__) template <typename DeviceContext, typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* input = ctx.Input<phi::DenseTensor>("X"); auto* output = ctx.Output<phi::DenseTensor>("Out"); auto* indices = ctx.Output<phi::DenseTensor>("Indices"); int k = static_cast<int>(ctx.Attr<int>("k")); auto* k_t = ctx.Input<phi::DenseTensor>("K"); if (k_t) { phi::DenseTensor k_host; framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host); k = k_host.data<int>()[0]; framework::DDim output_dims = output->dims(); output_dims[output_dims.size() - 1] = k; output->Resize(output_dims); indices->Resize(output_dims); } const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? framework::DDim inputdims = input->dims(); const int64_t input_height = phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1)); const int64_t input_width = inputdims[inputdims.size() - 1]; const auto& dev_ctx = ctx.cuda_device_context(); if ((input_width <= 1024 || k >= 128 || k == input_width)) { if (phi::funcs::SortTopk<T>( dev_ctx, input, input_width, input_height, k, output, indices)) { // Successed, return. return; } else { LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; phi::backends::gpu::GpuLaunchConfig config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width); switch (config.thread_per_block.x) { FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) { FIXED_MAXLENGTH( hipLaunchKernelGGL(( phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW(platform::errors::Fatal( "the input k has error when use getMaxLength function to get the " "maxLength.")); }); default: PADDLE_THROW(platform::errors::Unavailable( "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } }; template <typename DeviceContext, typename T> class TopkOpGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* x = context.Input<phi::DenseTensor>("X"); auto* out_grad = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto* indices = context.Input<phi::DenseTensor>("Indices"); auto* x_grad = context.Output<phi::DenseTensor>(framework::GradVarName("X")); T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const int64_t* indices_data = indices->data<int64_t>(); size_t k = indices->dims()[indices->dims().size() - 1]; framework::DDim xdims = x->dims(); const size_t row = phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1)); const size_t col = xdims[xdims.size() - 1]; const auto& dev_ctx = context.cuda_device_context(); const int kMaxHeight = 2048; int gridx = row < kMaxHeight ? row : kMaxHeight; switch (phi::funcs::GetDesiredBlockDim(col)) { FIXED_BLOCK_DIM( hipLaunchKernelGGL(( phi::funcs::AssignGrad<T, 5, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), x_grad_data, indices_data, out_grad_data, row, col, k)); default: PADDLE_THROW( platform::errors::Unavailable("Error occurs when Assign Grad.")); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( top_k, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, float>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, double>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int64_t>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( top_k_grad, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, float>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, double>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int64_t>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, paddle::platform::float16>);
5e3d20c75f9832df0b12f9020ed804ab487aa08b.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstdio> #include <vector> #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/kernels/funcs/top_k_function_cuda.h" // set cub base traits in order to handle float16 namespace paddle { namespace operators { #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_MAXLENGTH_BASE(MaxLength, ...) \ case (MaxLength): { \ constexpr auto maxLength = (MaxLength); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(1024, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(512, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) #define FIXED_MAXLENGTH(...) \ FIXED_MAXLENGTH_BASE(1, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(2, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(3, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(4, ##__VA_ARGS__); \ FIXED_MAXLENGTH_BASE(5, ##__VA_ARGS__) template <typename DeviceContext, typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* input = ctx.Input<phi::DenseTensor>("X"); auto* output = ctx.Output<phi::DenseTensor>("Out"); auto* indices = ctx.Output<phi::DenseTensor>("Indices"); int k = static_cast<int>(ctx.Attr<int>("k")); auto* k_t = ctx.Input<phi::DenseTensor>("K"); if (k_t) { phi::DenseTensor k_host; framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host); k = k_host.data<int>()[0]; framework::DDim output_dims = output->dims(); output_dims[output_dims.size() - 1] = k; output->Resize(output_dims); indices->Resize(output_dims); } const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? framework::DDim inputdims = input->dims(); const int64_t input_height = phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1)); const int64_t input_width = inputdims[inputdims.size() - 1]; const auto& dev_ctx = ctx.cuda_device_context(); if ((input_width <= 1024 || k >= 128 || k == input_width)) { if (phi::funcs::SortTopk<T>( dev_ctx, input, input_width, input_height, k, output, indices)) { // Successed, return. return; } else { LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; phi::backends::gpu::GpuLaunchConfig config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, input_width); switch (config.thread_per_block.x) { FIXED_BLOCK_DIM(switch (phi::funcs::getMaxLength(k)) { FIXED_MAXLENGTH( phi::funcs::KeMatrixTopK<T, maxLength, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW(platform::errors::Fatal( "the input k has error when use getMaxLength function to get the " "maxLength.")); }); default: PADDLE_THROW(platform::errors::Unavailable( "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } }; template <typename DeviceContext, typename T> class TopkOpGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* x = context.Input<phi::DenseTensor>("X"); auto* out_grad = context.Input<phi::DenseTensor>(framework::GradVarName("Out")); auto* indices = context.Input<phi::DenseTensor>("Indices"); auto* x_grad = context.Output<phi::DenseTensor>(framework::GradVarName("X")); T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const int64_t* indices_data = indices->data<int64_t>(); size_t k = indices->dims()[indices->dims().size() - 1]; framework::DDim xdims = x->dims(); const size_t row = phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1)); const size_t col = xdims[xdims.size() - 1]; const auto& dev_ctx = context.cuda_device_context(); const int kMaxHeight = 2048; int gridx = row < kMaxHeight ? row : kMaxHeight; switch (phi::funcs::GetDesiredBlockDim(col)) { FIXED_BLOCK_DIM( phi::funcs::AssignGrad<T, 5, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( x_grad_data, indices_data, out_grad_data, row, col, k)); default: PADDLE_THROW( platform::errors::Unavailable("Error occurs when Assign Grad.")); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( top_k, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, float>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, double>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, int64_t>, paddle::operators::TopkOpCUDAKernel<phi::GPUContext, paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( top_k_grad, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, float>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, double>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, int64_t>, paddle::operators::TopkOpGradCUDAKernel<phi::GPUContext, paddle::platform::float16>);
1cf9de98a7abc757965e2ccbfd19f031130a022e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Reference code implementing the box blur filter. Build and execute as follows: make clean && make ./blur_filter size Author: Naga Kandasamy Date created: May 3, 2019 Date modified: May 12, 2020 FIXME: Student name(s) */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* #define DEBUG */ /* Include the kernel code */ #include "blur_filter_kernel.cu" extern "C" void compute_gold(const image_t, image_t); void compute_on_device(const image_t, image_t); image_t allocate_matrix_on_device(image_t M); void copy_matrix_to_device(image_t Mdevice, image_t Mhost); void copy_matrix_from_device(image_t Mhost, image_t Mdevice); void free_matrix_on_device(image_t *M); void free_matrix_on_host(image_t *M); void check_CUDA_error(const char *msg); int check_results(const float *, const float *, int, float); void print_image(const image_t); int main(int argc, char **argv) { if (argc < 2) { fprintf(stderr, "Usage: %s size\n", argv[0]); fprintf( stderr, "size: Height of the image. The program assumes size x size image.\n"); exit(EXIT_FAILURE); } /* Allocate memory for the input and output images */ int size = atoi(argv[1]); printf("%d\t", size); fprintf(stderr, "Creating %d x %d images\n", size, size); image_t in, out_gold, out_gpu; in.size = out_gold.size = out_gpu.size = size; in.elements = (float *) malloc(sizeof(float) * size * size); out_gold.elements = (float *) malloc(sizeof(float) * size * size); out_gpu.elements = (float *) malloc(sizeof(float) * size * size); if ((in.elements == NULL) || (out_gold.elements == NULL) || (out_gpu.elements == NULL)) { perror("Malloc"); exit(EXIT_FAILURE); } /* Poplulate our image with random values between [-0.5 +0.5] */ srand(time(NULL)); int i; for (i = 0; i < size * size; i++) in.elements[i] = rand() / (float) RAND_MAX - 0.5; /* Calculate the blur on the CPU. The result is stored in out_gold. */ fprintf(stderr, "Calculating blur on the CPU\n"); struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(in, out_gold); gettimeofday(&stop, NULL); // fprintf(stderr, "Execution time = %fs\n", // (float) (stop.tv_sec - start.tv_sec // + (stop.tv_usec - start.tv_usec) / (float) 1000000)); printf("%.5f\t", (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000)); #ifdef DEBUG print_image(in); print_image(out_gold); #endif /* FIXME: Calculate the blur on the GPU. The result is stored in out_gpu. */ fprintf(stderr, "Calculating blur on the GPU\n"); compute_on_device(in, out_gpu); // print_image(out_gpu); /* Check CPU and GPU results for correctness */ fprintf(stderr, "Checking CPU and GPU results\n"); int num_elements = out_gold.size * out_gold.size; float eps = 1e-6; /* Do not change */ int check; check = check_results(out_gold.elements, out_gpu.elements, num_elements, eps); if (check == 0) fprintf(stderr, "TEST PASSED\n"); else fprintf(stderr, "TEST FAILED\n"); /* Free data structures on the host */ free((void *) in.elements); free((void *) out_gold.elements); free((void *) out_gpu.elements); exit(EXIT_SUCCESS); } /* FIXME: Complete this function to calculate the blur on the GPU */ void compute_on_device(const image_t in_host, image_t out_host) { /* Allocate memory and copy matrices to device */ image_t in_dev = allocate_matrix_on_device(in_host); image_t out_dev = allocate_matrix_on_device(out_host); copy_matrix_to_device(in_dev, in_host); copy_matrix_to_device(out_dev, out_host); struct timeval start, stop; gettimeofday(&start, NULL); /* Set up the execution grid */ dim3 threads(TILE_SIZE, TILE_SIZE); fprintf(stderr, "Setting up a %d x %d grid of thread blocks\n", (out_dev.size + TILE_SIZE - 1) / TILE_SIZE, (out_dev.size + TILE_SIZE - 1) / TILE_SIZE); dim3 grid((out_dev.size + TILE_SIZE - 1) / TILE_SIZE, (out_dev.size + TILE_SIZE - 1) / TILE_SIZE); /* Launch kernel */ hipLaunchKernelGGL(( blur_filter_kernel), dim3(grid), dim3(threads), 0, 0, in_dev.elements, out_dev.elements, in_host.size); hipDeviceSynchronize(); gettimeofday(&stop, NULL); // fprintf(stderr, "Execution time = %fs\n", (float) (stop.tv_sec - start.tv_sec // + (stop.tv_usec - start.tv_usec) / (float) 1000000)); printf("%.5f\n", (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000)); check_CUDA_error("Error in kernel"); copy_matrix_from_device(out_host, out_dev); free_matrix_on_device(&in_dev); free_matrix_on_device(&out_dev); } /* Allocate memory on device for matrix */ image_t allocate_matrix_on_device(image_t M) { image_t Mdevice = M; int size = M.size * M.size * sizeof(float); hipMalloc((void **) &Mdevice.elements, size); if (Mdevice.elements == NULL) { fprintf(stderr, "CudaMalloc error\n"); exit(EXIT_FAILURE); } return Mdevice; } /* Copy matrix from host memory to device memory */ void copy_matrix_to_device(image_t Mdevice, image_t Mhost) { int size = Mhost.size * Mhost.size * sizeof(float); hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } /* Copy matrix from device memory to host memory */ void copy_matrix_from_device(image_t Mhost, image_t Mdevice) { int size = Mdevice.size * Mdevice.size * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } /* Free matrix on device */ void free_matrix_on_device(image_t *M) { hipFree(M->elements); M->elements = NULL; } /* Free matrix on host */ void free_matrix_on_host(image_t *M) { free(M->elements); M->elements = NULL; } /* Check for errors during kernel execution */ void check_CUDA_error(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /* Check correctness of results */ int check_results(const float *pix1, const float *pix2, int num_elements, float eps) { int i; for (i = 0; i < num_elements; i++) if (fabsf((pix1[i] - pix2[i]) / pix1[i]) > eps) return -1; return 0; } /* Print out the image contents */ void print_image(const image_t img) { int i, j; float val; for (i = 0; i < img.size; i++) { for (j = 0; j < img.size; j++) { val = img.elements[i * img.size + j]; printf("%0.4f ", val); } printf("\n"); } printf("\n"); }
1cf9de98a7abc757965e2ccbfd19f031130a022e.cu
/* Reference code implementing the box blur filter. Build and execute as follows: make clean && make ./blur_filter size Author: Naga Kandasamy Date created: May 3, 2019 Date modified: May 12, 2020 FIXME: Student name(s) */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> /* #define DEBUG */ /* Include the kernel code */ #include "blur_filter_kernel.cu" extern "C" void compute_gold(const image_t, image_t); void compute_on_device(const image_t, image_t); image_t allocate_matrix_on_device(image_t M); void copy_matrix_to_device(image_t Mdevice, image_t Mhost); void copy_matrix_from_device(image_t Mhost, image_t Mdevice); void free_matrix_on_device(image_t *M); void free_matrix_on_host(image_t *M); void check_CUDA_error(const char *msg); int check_results(const float *, const float *, int, float); void print_image(const image_t); int main(int argc, char **argv) { if (argc < 2) { fprintf(stderr, "Usage: %s size\n", argv[0]); fprintf( stderr, "size: Height of the image. The program assumes size x size image.\n"); exit(EXIT_FAILURE); } /* Allocate memory for the input and output images */ int size = atoi(argv[1]); printf("%d\t", size); fprintf(stderr, "Creating %d x %d images\n", size, size); image_t in, out_gold, out_gpu; in.size = out_gold.size = out_gpu.size = size; in.elements = (float *) malloc(sizeof(float) * size * size); out_gold.elements = (float *) malloc(sizeof(float) * size * size); out_gpu.elements = (float *) malloc(sizeof(float) * size * size); if ((in.elements == NULL) || (out_gold.elements == NULL) || (out_gpu.elements == NULL)) { perror("Malloc"); exit(EXIT_FAILURE); } /* Poplulate our image with random values between [-0.5 +0.5] */ srand(time(NULL)); int i; for (i = 0; i < size * size; i++) in.elements[i] = rand() / (float) RAND_MAX - 0.5; /* Calculate the blur on the CPU. The result is stored in out_gold. */ fprintf(stderr, "Calculating blur on the CPU\n"); struct timeval start, stop; gettimeofday(&start, NULL); compute_gold(in, out_gold); gettimeofday(&stop, NULL); // fprintf(stderr, "Execution time = %fs\n", // (float) (stop.tv_sec - start.tv_sec // + (stop.tv_usec - start.tv_usec) / (float) 1000000)); printf("%.5f\t", (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000)); #ifdef DEBUG print_image(in); print_image(out_gold); #endif /* FIXME: Calculate the blur on the GPU. The result is stored in out_gpu. */ fprintf(stderr, "Calculating blur on the GPU\n"); compute_on_device(in, out_gpu); // print_image(out_gpu); /* Check CPU and GPU results for correctness */ fprintf(stderr, "Checking CPU and GPU results\n"); int num_elements = out_gold.size * out_gold.size; float eps = 1e-6; /* Do not change */ int check; check = check_results(out_gold.elements, out_gpu.elements, num_elements, eps); if (check == 0) fprintf(stderr, "TEST PASSED\n"); else fprintf(stderr, "TEST FAILED\n"); /* Free data structures on the host */ free((void *) in.elements); free((void *) out_gold.elements); free((void *) out_gpu.elements); exit(EXIT_SUCCESS); } /* FIXME: Complete this function to calculate the blur on the GPU */ void compute_on_device(const image_t in_host, image_t out_host) { /* Allocate memory and copy matrices to device */ image_t in_dev = allocate_matrix_on_device(in_host); image_t out_dev = allocate_matrix_on_device(out_host); copy_matrix_to_device(in_dev, in_host); copy_matrix_to_device(out_dev, out_host); struct timeval start, stop; gettimeofday(&start, NULL); /* Set up the execution grid */ dim3 threads(TILE_SIZE, TILE_SIZE); fprintf(stderr, "Setting up a %d x %d grid of thread blocks\n", (out_dev.size + TILE_SIZE - 1) / TILE_SIZE, (out_dev.size + TILE_SIZE - 1) / TILE_SIZE); dim3 grid((out_dev.size + TILE_SIZE - 1) / TILE_SIZE, (out_dev.size + TILE_SIZE - 1) / TILE_SIZE); /* Launch kernel */ blur_filter_kernel<<<grid, threads>>>(in_dev.elements, out_dev.elements, in_host.size); cudaDeviceSynchronize(); gettimeofday(&stop, NULL); // fprintf(stderr, "Execution time = %fs\n", (float) (stop.tv_sec - start.tv_sec // + (stop.tv_usec - start.tv_usec) / (float) 1000000)); printf("%.5f\n", (float) (stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float) 1000000)); check_CUDA_error("Error in kernel"); copy_matrix_from_device(out_host, out_dev); free_matrix_on_device(&in_dev); free_matrix_on_device(&out_dev); } /* Allocate memory on device for matrix */ image_t allocate_matrix_on_device(image_t M) { image_t Mdevice = M; int size = M.size * M.size * sizeof(float); cudaMalloc((void **) &Mdevice.elements, size); if (Mdevice.elements == NULL) { fprintf(stderr, "CudaMalloc error\n"); exit(EXIT_FAILURE); } return Mdevice; } /* Copy matrix from host memory to device memory */ void copy_matrix_to_device(image_t Mdevice, image_t Mhost) { int size = Mhost.size * Mhost.size * sizeof(float); cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } /* Copy matrix from device memory to host memory */ void copy_matrix_from_device(image_t Mhost, image_t Mdevice) { int size = Mdevice.size * Mdevice.size * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } /* Free matrix on device */ void free_matrix_on_device(image_t *M) { cudaFree(M->elements); M->elements = NULL; } /* Free matrix on host */ void free_matrix_on_host(image_t *M) { free(M->elements); M->elements = NULL; } /* Check for errors during kernel execution */ void check_CUDA_error(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* Check correctness of results */ int check_results(const float *pix1, const float *pix2, int num_elements, float eps) { int i; for (i = 0; i < num_elements; i++) if (fabsf((pix1[i] - pix2[i]) / pix1[i]) > eps) return -1; return 0; } /* Print out the image contents */ void print_image(const image_t img) { int i, j; float val; for (i = 0; i < img.size; i++) { for (j = 0; j < img.size; j++) { val = img.elements[i * img.size + j]; printf("%0.4f ", val); } printf("\n"); } printf("\n"); }
4214a3e7e2ec3a822a1154cda2494107efd8be9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"hip/hip_runtime_api.h" #include<stdio.h> #include<stdlib.h> #include <cusolverDn.h> #include "Utilities.cuh" void evd(float* d_covarianceMatrix, float*h_U, float*h_S, int Nrows, int Ncols) { // cusolver int work_size = 0; int *devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int))); // cusolver hipsolverDnHandle_t solver_handle; hipsolverDnCreate(&solver_handle); // float *d_S; gpuErrchk(hipMalloc(&d_S, Nrows * sizeof(float))); // cusolveSafeCall(hipsolverDnSsyevd_bufferSize(solver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_LOWER, Nrows, d_covarianceMatrix, Nrows, d_S, &work_size)); float *work; gpuErrchk(hipMalloc(&work, work_size * sizeof(float))); // cusolveSafeCall(hipsolverDnSsyevd(solver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_LOWER, Nrows, d_covarianceMatrix, Nrows, d_S, work, work_size, devInfo)); int devInfo_h = 0; gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost)); if (devInfo_h != 0) printf("Unsuccessful SVD execution\n\n"); // GPU gpuErrchk(hipMemcpy(h_S, d_S, Nrows * sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_U, d_covarianceMatrix, Nrows * Nrows * sizeof(float), hipMemcpyDeviceToHost));// // hipFree(devInfo); hipFree(work); hipFree(d_S); hipsolverDnDestroy(solver_handle); }
4214a3e7e2ec3a822a1154cda2494107efd8be9c.cu
#include "cuda_runtime.h" #include"cuda_runtime_api.h" #include<stdio.h> #include<stdlib.h> #include <cusolverDn.h> #include "Utilities.cuh" void evd(float* d_covarianceMatrix, float*h_U, float*h_S, int Nrows, int Ncols) { // cusolver前期参数 int work_size = 0; int *devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int))); // cusolver初始化 cusolverDnHandle_t solver_handle; cusolverDnCreate(&solver_handle); // 对角矩阵 float *d_S; gpuErrchk(cudaMalloc(&d_S, Nrows * sizeof(float))); // 计算特征值分解所需空间 cusolveSafeCall(cusolverDnSsyevd_bufferSize(solver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_LOWER, Nrows, d_covarianceMatrix, Nrows, d_S, &work_size)); float *work; gpuErrchk(cudaMalloc(&work, work_size * sizeof(float))); // 调用函数特征值分解 cusolveSafeCall(cusolverDnSsyevd(solver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_LOWER, Nrows, d_covarianceMatrix, Nrows, d_S, work, work_size, devInfo)); int devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost)); if (devInfo_h != 0) printf("Unsuccessful SVD execution\n\n"); // 从GPU取出数据 gpuErrchk(cudaMemcpy(h_S, d_S, Nrows * sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_U, d_covarianceMatrix, Nrows * Nrows * sizeof(float), cudaMemcpyDeviceToHost));//特徵向量儲存在原矩陣空間 //释放空间 cudaFree(devInfo); cudaFree(work); cudaFree(d_S); cusolverDnDestroy(solver_handle); }
79b86e370b5d23215e3af96e3707cfcb2c257140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <string.h> #include <ctime> #include <math.h> /* pow, ceil */ #include <algorithm> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> //Windows has <direct.h>, POSIX systems have <unistd.h> #include <unistd.h> /*To get the path to this script's directory*/ #include <sys/syslimits.h> using namespace std; const int C = 6144; //const int b = 8;//To cause misses. const int b = 0;//For no misses. //const int N = C+b+1; //24580 bytes = 24KB + 4B in binary => first miss const int N = 9*8 + C+(b*5); //const int N = 6144; //24576 bytes = 24KB => no miss => C = 6144 //const int N = 6000; //24000 bytes = 24KB in decimal => no miss const int s = 8;//s = 128*4 bytes //Max iterations with stored array //indeces being ints //const int iterations = 0xc000/8 - 4; //34 because 17*500 = 8500 => traverse array N once. //*2 for traversing twice to eliminate cold miss effect. //The modulus so it will do N+1 when odd and N+0 when even. //Just so the division always ends up being an even # div by 2. const int iterations = ((N+(N%2))/s)*4; //const int iterations = ((N+(N%2))/s); //const int iterations = 6143; //const int iterations = 768*2; //Both threshold and tolerance //dynamically allocated in main() unsigned int threshold = 200; float tolerance = 0.3; //30% tolerance const char *path = getcwd(NULL,0); __global__ void bench_CacheAccess(unsigned int *CUDA_A, unsigned int device_tvalue[], unsigned int device_index[]) { //Placing variables in shared memory makes them //not interfere with the global memory cache and, hence, the experiment __shared__ unsigned int s_tvalue[iterations]; __shared__ unsigned int s_index[iterations]; //__shared__ unsigned int s_tvalue[iterations]; //__shared__ unsigned int s_index[iterations]; //__shared__ int j; int j; j = 0; for (int it = 0; it < iterations; it++) { clock_t start_time = clock(); j = CUDA_A[j]; //Store the element index //Also generates memory dependence on previous //instruction, so that clock() happens after the //array access above s_index[it] = j; clock_t end_time = clock(); //store the access latency s_tvalue[it] = end_time - start_time; } //All threads in this block have to reach this point //before continuing execution. __syncthreads(); //Transfer results from shared memory to global memory //Later we will memcpy() the device global memory to host for (int i = 0; i < iterations; i++) { device_index[i] = s_index[i]; device_tvalue[i] = s_tvalue[i]; } } int main() { printf("Will go through [%d] iterations with array of size N = [%d].\n", iterations, N); FILE * file; unsigned int *A = new unsigned int[N]; //The array of size N to test the cache unsigned int *host_tvalue = new unsigned int[iterations]; //Time values for memory accesses unsigned int *host_index = new unsigned int[iterations]; //Index array of the accesses to the array elements int hits = 0, misses = 0; //Initialize array for (int i = 0; i < N; i++) { A[i] = (i + s) % N; } //Initialize index and time value arrays for (int k = 0; k < iterations; k++) { host_tvalue[k] = 0; host_index[k] = 0; } hipFuncSetCacheConfig(bench_CacheAccess, hipFuncCachePreferL1); hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return -1; } unsigned int *CUDA_A = 0; //When we allocate space for A on the GPU we assign it to this ptr, CUDA_A unsigned int *device_tvalue = 0; //Device variables needed to copy back to host. unsigned int *device_index = 0; //Places array into cache cudaStatus = hipMalloc((void**)&CUDA_A, N * sizeof(unsigned int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); hipDeviceReset(); //Clear all allocations and exit } //Places array into cache cudaStatus = hipMalloc((void**)&device_tvalue, iterations * sizeof(unsigned int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed for the tvalues array!"); hipDeviceReset(); //Clear all allocations and exit return -1; } //Places array into cache cudaStatus = hipMalloc((void**)&device_index, iterations * sizeof(unsigned int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed for the index array!"); hipDeviceReset(); //Clear all allocations and exit return -1; } for (int a = 0; a < 10; a++){ // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(CUDA_A, A, N * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for the array!"); hipDeviceReset(); return -1; } } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(device_index, host_index, iterations * sizeof(unsigned int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for the index array!"); hipDeviceReset(); return -1; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(device_tvalue, host_tvalue, iterations * sizeof(unsigned int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for the tvalues array!"); hipDeviceReset(); } // Classic P-chase benchmark. hipLaunchKernelGGL(( bench_CacheAccess), dim3(1),dim3(1), 0, 0, CUDA_A, device_tvalue, device_index); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); return -1; } // cudadevicesynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "cudadevicesynchronize returned error code %d after launching kernel!\n", cudaStatus); return -1; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(host_tvalue, device_tvalue, iterations * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! Could not retrieve tvalue from device.\n"); return -1; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(host_index, device_index, iterations * sizeof(unsigned int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! Could not retrieve index from device.\n"); return -1; } //Dynamically allocate threshold. First access is always a cold miss int delta = (int) (tolerance * host_tvalue[0]); //If I don't hardcode threshold, calculate it if (threshold == 0) threshold = host_tvalue[0] - delta; file = fopen("experiment_results.dat","w"); if ((host_tvalue != NULL) && (host_index != NULL)) { for (int a = 0; a < iterations; a++) { if (host_tvalue[a] > threshold) misses++; else hits++; } } printf("%d hits\n%d misses\n", hits, misses); printf("threshold = %d\n", threshold); //printf("b = %d\n",((s*N)/(N-s*hits))); fprintf(file, "hits|misses\n"); fprintf(file, "%d|%d\n", hits, misses); fprintf(file, "threshold=%d\n", threshold); fprintf(file, "arraySize=%d\n", N); fprintf(file, "stride=%d\n", s); fprintf(file, "numIterations=%d\n", iterations); fprintf(file,"arrayIndex|tvalue\n"); for (int b = 0; b < iterations; b++) { //printf("host_index[%d] = %d\n",b,host_index[b]); //printf("host_tvalue[%d] = %d\n", b, host_tvalue[b]); fprintf(file,"%d|%d\n",host_index[b],host_tvalue[b]); } fprintf(file,"end\n"); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } fclose(file); //Call python GUI script to show results printf("path = %s\n",path); //+7 because of "python\s" char cmd[PATH_MAX + 7]; snprintf(cmd,sizeof(cmd),"python %s/../Python_Scripts/GUI.py", path); system(cmd); return 0; }
79b86e370b5d23215e3af96e3707cfcb2c257140.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <string.h> #include <ctime> #include <math.h> /* pow, ceil */ #include <algorithm> #include <device_functions.h> #include <cuda_runtime_api.h> #include <cuda.h> //Windows has <direct.h>, POSIX systems have <unistd.h> #include <unistd.h> /*To get the path to this script's directory*/ #include <sys/syslimits.h> using namespace std; const int C = 6144; //const int b = 8;//To cause misses. const int b = 0;//For no misses. //const int N = C+b+1; //24580 bytes = 24KB + 4B in binary => first miss const int N = 9*8 + C+(b*5); //const int N = 6144; //24576 bytes = 24KB => no miss => C = 6144 //const int N = 6000; //24000 bytes = 24KB in decimal => no miss const int s = 8;//s = 128*4 bytes //Max iterations with stored array //indeces being ints //const int iterations = 0xc000/8 - 4; //34 because 17*500 = 8500 => traverse array N once. //*2 for traversing twice to eliminate cold miss effect. //The modulus so it will do N+1 when odd and N+0 when even. //Just so the division always ends up being an even # div by 2. const int iterations = ((N+(N%2))/s)*4; //const int iterations = ((N+(N%2))/s); //const int iterations = 6143; //const int iterations = 768*2; //Both threshold and tolerance //dynamically allocated in main() unsigned int threshold = 200; float tolerance = 0.3; //30% tolerance const char *path = getcwd(NULL,0); __global__ void bench_CacheAccess(unsigned int *CUDA_A, unsigned int device_tvalue[], unsigned int device_index[]) { //Placing variables in shared memory makes them //not interfere with the global memory cache and, hence, the experiment __shared__ unsigned int s_tvalue[iterations]; __shared__ unsigned int s_index[iterations]; //__shared__ unsigned int s_tvalue[iterations]; //__shared__ unsigned int s_index[iterations]; //__shared__ int j; int j; j = 0; for (int it = 0; it < iterations; it++) { clock_t start_time = clock(); j = CUDA_A[j]; //Store the element index //Also generates memory dependence on previous //instruction, so that clock() happens after the //array access above s_index[it] = j; clock_t end_time = clock(); //store the access latency s_tvalue[it] = end_time - start_time; } //All threads in this block have to reach this point //before continuing execution. __syncthreads(); //Transfer results from shared memory to global memory //Later we will memcpy() the device global memory to host for (int i = 0; i < iterations; i++) { device_index[i] = s_index[i]; device_tvalue[i] = s_tvalue[i]; } } int main() { printf("Will go through [%d] iterations with array of size N = [%d].\n", iterations, N); FILE * file; unsigned int *A = new unsigned int[N]; //The array of size N to test the cache unsigned int *host_tvalue = new unsigned int[iterations]; //Time values for memory accesses unsigned int *host_index = new unsigned int[iterations]; //Index array of the accesses to the array elements int hits = 0, misses = 0; //Initialize array for (int i = 0; i < N; i++) { A[i] = (i + s) % N; } //Initialize index and time value arrays for (int k = 0; k < iterations; k++) { host_tvalue[k] = 0; host_index[k] = 0; } cudaFuncSetCacheConfig(bench_CacheAccess, cudaFuncCachePreferL1); cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return -1; } unsigned int *CUDA_A = 0; //When we allocate space for A on the GPU we assign it to this ptr, CUDA_A unsigned int *device_tvalue = 0; //Device variables needed to copy back to host. unsigned int *device_index = 0; //Places array into cache cudaStatus = cudaMalloc((void**)&CUDA_A, N * sizeof(unsigned int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); cudaDeviceReset(); //Clear all allocations and exit } //Places array into cache cudaStatus = cudaMalloc((void**)&device_tvalue, iterations * sizeof(unsigned int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for the tvalues array!"); cudaDeviceReset(); //Clear all allocations and exit return -1; } //Places array into cache cudaStatus = cudaMalloc((void**)&device_index, iterations * sizeof(unsigned int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for the index array!"); cudaDeviceReset(); //Clear all allocations and exit return -1; } for (int a = 0; a < 10; a++){ // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(CUDA_A, A, N * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for the array!"); cudaDeviceReset(); return -1; } } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(device_index, host_index, iterations * sizeof(unsigned int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for the index array!"); cudaDeviceReset(); return -1; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(device_tvalue, host_tvalue, iterations * sizeof(unsigned int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for the tvalues array!"); cudaDeviceReset(); } // Classic P-chase benchmark. bench_CacheAccess<<<1,1>>>(CUDA_A, device_tvalue, device_index); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); return -1; } // cudadevicesynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudadevicesynchronize returned error code %d after launching kernel!\n", cudaStatus); return -1; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(host_tvalue, device_tvalue, iterations * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! Could not retrieve tvalue from device.\n"); return -1; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(host_index, device_index, iterations * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! Could not retrieve index from device.\n"); return -1; } //Dynamically allocate threshold. First access is always a cold miss int delta = (int) (tolerance * host_tvalue[0]); //If I don't hardcode threshold, calculate it if (threshold == 0) threshold = host_tvalue[0] - delta; file = fopen("experiment_results.dat","w"); if ((host_tvalue != NULL) && (host_index != NULL)) { for (int a = 0; a < iterations; a++) { if (host_tvalue[a] > threshold) misses++; else hits++; } } printf("%d hits\n%d misses\n", hits, misses); printf("threshold = %d\n", threshold); //printf("b = %d\n",((s*N)/(N-s*hits))); fprintf(file, "hits|misses\n"); fprintf(file, "%d|%d\n", hits, misses); fprintf(file, "threshold=%d\n", threshold); fprintf(file, "arraySize=%d\n", N); fprintf(file, "stride=%d\n", s); fprintf(file, "numIterations=%d\n", iterations); fprintf(file,"arrayIndex|tvalue\n"); for (int b = 0; b < iterations; b++) { //printf("host_index[%d] = %d\n",b,host_index[b]); //printf("host_tvalue[%d] = %d\n", b, host_tvalue[b]); fprintf(file,"%d|%d\n",host_index[b],host_tvalue[b]); } fprintf(file,"end\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } fclose(file); //Call python GUI script to show results printf("path = %s\n",path); //+7 because of "python\s" char cmd[PATH_MAX + 7]; snprintf(cmd,sizeof(cmd),"python %s/../Python_Scripts/GUI.py", path); system(cmd); return 0; }
047c41488a405bb8cbd2e780db08bfb73b673da6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/Dispatch.h> #include <THH/THHAtomics.cuh> #include <ATen/ATen.h> #include <torch/torch.h> #include <vector> #include <optional> /** * Friendly reminder of how multithreading works in CUDA: https://developer.nvidia.com/blog/even-easier-introduction-cuda * Check example at https://github.com/thomasw21/LinearTransformers/blob/main/model/attention/fast_weight/fast_weight_cuda.cu **/ // Available in pytorch main //#define DISPATCH_CASE_FLOATING_TYPES(...) \ // at::AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ /* * Forward passes */ /** * cast to fp32 if in fp16 + mask + softmax computation in fp32 + cast back to original dtype **/ template<typename attention_scores_scalar, int64_t min_kv_length_shard_size_per_thread> __global__ void forward_masked_softmax_kernel( const torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> attention_scores, // [B, KV] const torch::PackedTensorAccessor32<bool, 2, torch::RestrictPtrTraits> mask, // [B, KV] torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> result, // [B, KV] const int64_t effective_kv_length, const dim3 blockDim, const int64_t rows_per_block, const int64_t kv_length, const int64_t batch_size ) { const auto row_id = threadIdx.x / effective_kv_length; const auto effective_kv_length_id = threadIdx.x % effective_kv_length; const auto kv_length_start = effective_kv_length_id * min_kv_length_shard_size_per_thread; auto kv_length_end_ = (effective_kv_length_id + 1) * min_kv_length_shard_size_per_thread; kv_length_end_ = (kv_length_end_ > kv_length) ? kv_length : kv_length_end_; const auto kv_length_end = kv_length_end_; const auto batch_id = blockIdx.x * rows_per_block + row_id; // We need 2 float storage for each row, one for max computation, the other for normalizing exponential extern __shared__ float temp_storage[]; const auto row_id_mem_offset = row_id * 2; if (effective_kv_length_id == 0) { temp_storage[row_id_mem_offset] = -std::numeric_limits<float>::infinity(); temp_storage[row_id_mem_offset + 1] = 0; } __syncthreads(); // Compute mask and max if (batch_id < batch_size) { float thread_max = -std::numeric_limits<float>::infinity(); for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { const float candidate = attention_scores[batch_id][kv_length_id]; thread_max = (thread_max < candidate) ? candidate : thread_max; } } if (thread_max != -std::numeric_limits<float>::infinity()) { // TODO @thomasw21 with more memory we can probably compute a much faster `max-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicMax(&temp_storage[row_id_mem_offset], thread_max); } } __syncthreads(); // Compute exp(elt - max) masked float exponential[min_kv_length_shard_size_per_thread]; if (batch_id < batch_size) { float thread_add = 0; for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { exponential[kv_length_id - kv_length_start] = ::exp(static_cast<float>(attention_scores[batch_id][kv_length_id]) - temp_storage[row_id_mem_offset]); thread_add = thread_add + exponential[kv_length_id - kv_length_start]; } else { exponential[kv_length_id - kv_length_start] = 0.; } } if (thread_add > 0) { // TODO @thomasw21 with more memory we can probably compute a much faster `sum-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicAdd(&temp_storage[row_id_mem_offset + 1], thread_add); } } __syncthreads(); // Compute softmax if (batch_id < batch_size) { // If sum of all exponential is 0, we set the softmax values to 0 if (temp_storage[row_id_mem_offset + 1] == 0.) { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = 0.; } } else { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = static_cast<attention_scores_scalar>(exponential[kv_length_id - kv_length_start] / temp_storage[row_id_mem_offset + 1]); } } } } #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) std::tuple<at::Tensor, std::optional<std::vector<at::Tensor>>, at::Tensor> forward( const at::Tensor query, const at::Tensor query_pass, const at::Tensor key, const at::Tensor key_pass, const at::Tensor value, const std::optional<std::vector<at::Tensor>> layer_past, const at::Tensor attention_mask, const std::optional<at::Tensor> head_mask, const float inv_norm_factor, const int num_heads, const bool use_cache ) { auto query_layer = at::cat({query, query_pass}, 3); auto key_layer = at::cat({key, key_pass}, 3); auto value_layer = value; if (layer_past) { const auto past_key = (*layer_past).at(0); const auto past_value = (*layer_past).at(1); key_layer = at::cat({past_key, key_layer}, 2); value_layer = at::cat({past_value, value_layer}, 2); } std::optional<std::vector<at::Tensor>> present; if (use_cache) { present = {key_layer, value_layer}; } else { present = {}; } const auto batch_size = query_layer.size(0); const auto q_length = query_layer.size(2); const auto attn_head_size = query_layer.size(3); const auto batch_size_times_num_heads = batch_size * num_heads; const auto kv_length = key_layer.size(2); const auto query_view = query_layer.reshape({batch_size_times_num_heads, q_length, attn_head_size}); auto key_view = key_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}).transpose(1, 2); auto value_view = value_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}); auto query_scaled = query_view * inv_norm_factor; auto attention_scores = at::bmm(query_scaled, key_view); // Computing `optionally_cast_fp16_to_fp32 + masked_fill + softmax + cast_to_intial_dtype` at::Tensor attention_probs; if (true) { // TODO @thomasw21: it's easier to think of attention_scores as 2D tensors const auto attention_scores_2d = attention_scores.view({batch_size_times_num_heads * q_length, kv_length}); const auto attention_mask_2d = attention_mask.view({batch_size_times_num_heads * q_length, kv_length}); // Custom kernel attention_probs = at::empty_like(attention_scores_2d); // Check that inputs and contiguous + cuda tensors CHECK_INPUT(attention_scores_2d); CHECK_INPUT(attention_mask_2d); // TODO @thomas21: change by to this as it's cleaner when pytorch 1.13 comes out // DISPATCH_CASE_FLOATING_TYPES(attention_scores.scalar_type(), "masked_softmax", [&] { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, attention_scores.scalar_type(), "masked_softmax", [&] { /* * Understanding how GPUs work: https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ * A100 specifications: https://images.nvidia.com/aem-dam/en-zz/Solutions/data-center/nvidia-ampere-architecture-whitepaper.pdf * - SMs: 108 * - TPCs: 56 (What's that?) * - Memory size: 40 GB * - L2 Cache size: 40960 KB (shared across all SMs) * - L1/Shared memory size: 192 KB (shared across all threads within a SM) * - Max Threads / SM: 2048 * - Max Thread Blocks / SM: 32 */ /* * We should split [batch_size_times_num_heads_block, q_length] in seperate blocks and [batch_size_times_num_heads_block_size, kv_length] a single block * with multiple threads as we need to `sync_threads` to run exponential sum. * We maximise the usage of threads within a single block */ // TODO @thomasw21 figure out everything warp related: // - why do they have to be power of 2 // TODO @thomas21 check why everyone is setting 1024 when officially it's 2048 const auto MAX_THREADS_PER_SM = 1024; // TODO @thomasw21 figure out how to have longer sequences, currently the maximum is `max_kv_length = MAX_THREADS_PER_SM * MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD` const auto MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD = 4; // `effective_kv_length = ceil(kv_length / MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD)` const auto effective_kv_length = (kv_length - 1)/ MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD + 1; const auto rows_per_block = MAX_THREADS_PER_SM / effective_kv_length; const auto num_blocks = (batch_size_times_num_heads * q_length - 1) / rows_per_block + 1; const dim3 gridDim(num_blocks); // Number of blocks that run const dim3 blockDim(MAX_THREADS_PER_SM); // Number of threads that run per block const int shared_mem_forward = rows_per_block * 2 * sizeof(float); // 192 * 2 ** 10 // const auto MAX_L1_MEMORY = 196608; // const auto MAX_SMs = 108; // TORCH_CHECK(batch_size_times_num_heads * q_length <= MAX_L1_MEMORY, "Shared memory exceeds 192KB limitation."); // TORCH_CHECK(gridDim.x * gridDim.y * gridDim.z <= MAX_SMs, "A100s only have 108 SMs. Raising as require blocks is bigger."); // TORCH_CHECK(blockDim.x * blockDim.y * blockDim.z <= MAX_THREADS_PER_SM, "A100s only have 2048 threads per block. Raising as require requested threads is higher."); hipLaunchKernelGGL(( forward_masked_softmax_kernel<scalar_t, MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD>), dim3(gridDim), dim3(blockDim), shared_mem_forward, 0, attention_scores_2d.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), attention_mask_2d.packed_accessor32<bool, 2, torch::RestrictPtrTraits>(), attention_probs.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), effective_kv_length, blockDim, rows_per_block, kv_length, batch_size_times_num_heads * q_length ); }); attention_probs = attention_probs.view({batch_size_times_num_heads, q_length, kv_length}); } else { // Pytorch C++ API auto input_dtype = attention_scores.scalar_type(); if (input_dtype == at::ScalarType::Float) { attention_scores = attention_scores.to(at::ScalarType::Float); }; // TODO @thomasw21 Figure out how to get minimum value auto attn_weights = attention_scores.masked_fill_(attention_mask, -1e34); attention_probs = attn_weights.softmax(-1, at::ScalarType::Float).to(input_dtype); } auto context_layer = attention_probs.bmm(value_view); // `_merge_heads` context_layer = context_layer.view({batch_size, num_heads, q_length, attn_head_size}); context_layer = context_layer.permute({0, 2, 1, 3}); context_layer = context_layer.reshape({batch_size, q_length, attn_head_size * num_heads}); return std::make_tuple(context_layer, present, attention_probs); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "forward", &forward, "GPT-Neox attention mechanism forward (CUDA)" ); }
047c41488a405bb8cbd2e780db08bfb73b673da6.cu
#include <ATen/Dispatch.h> #include <THC/THCAtomics.cuh> #include <ATen/ATen.h> #include <torch/torch.h> #include <vector> #include <optional> /** * Friendly reminder of how multithreading works in CUDA: https://developer.nvidia.com/blog/even-easier-introduction-cuda * Check example at https://github.com/thomasw21/LinearTransformers/blob/main/model/attention/fast_weight/fast_weight_cuda.cu **/ // Available in pytorch main //#define DISPATCH_CASE_FLOATING_TYPES(...) \ // at::AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ /* * Forward passes */ /** * cast to fp32 if in fp16 + mask + softmax computation in fp32 + cast back to original dtype **/ template<typename attention_scores_scalar, int64_t min_kv_length_shard_size_per_thread> __global__ void forward_masked_softmax_kernel( const torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> attention_scores, // [B, KV] const torch::PackedTensorAccessor32<bool, 2, torch::RestrictPtrTraits> mask, // [B, KV] torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> result, // [B, KV] const int64_t effective_kv_length, const dim3 blockDim, const int64_t rows_per_block, const int64_t kv_length, const int64_t batch_size ) { const auto row_id = threadIdx.x / effective_kv_length; const auto effective_kv_length_id = threadIdx.x % effective_kv_length; const auto kv_length_start = effective_kv_length_id * min_kv_length_shard_size_per_thread; auto kv_length_end_ = (effective_kv_length_id + 1) * min_kv_length_shard_size_per_thread; kv_length_end_ = (kv_length_end_ > kv_length) ? kv_length : kv_length_end_; const auto kv_length_end = kv_length_end_; const auto batch_id = blockIdx.x * rows_per_block + row_id; // We need 2 float storage for each row, one for max computation, the other for normalizing exponential extern __shared__ float temp_storage[]; const auto row_id_mem_offset = row_id * 2; if (effective_kv_length_id == 0) { temp_storage[row_id_mem_offset] = -std::numeric_limits<float>::infinity(); temp_storage[row_id_mem_offset + 1] = 0; } __syncthreads(); // Compute mask and max if (batch_id < batch_size) { float thread_max = -std::numeric_limits<float>::infinity(); for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { const float candidate = attention_scores[batch_id][kv_length_id]; thread_max = (thread_max < candidate) ? candidate : thread_max; } } if (thread_max != -std::numeric_limits<float>::infinity()) { // TODO @thomasw21 with more memory we can probably compute a much faster `max-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicMax(&temp_storage[row_id_mem_offset], thread_max); } } __syncthreads(); // Compute exp(elt - max) masked float exponential[min_kv_length_shard_size_per_thread]; if (batch_id < batch_size) { float thread_add = 0; for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { exponential[kv_length_id - kv_length_start] = std::exp(static_cast<float>(attention_scores[batch_id][kv_length_id]) - temp_storage[row_id_mem_offset]); thread_add = thread_add + exponential[kv_length_id - kv_length_start]; } else { exponential[kv_length_id - kv_length_start] = 0.; } } if (thread_add > 0) { // TODO @thomasw21 with more memory we can probably compute a much faster `sum-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicAdd(&temp_storage[row_id_mem_offset + 1], thread_add); } } __syncthreads(); // Compute softmax if (batch_id < batch_size) { // If sum of all exponential is 0, we set the softmax values to 0 if (temp_storage[row_id_mem_offset + 1] == 0.) { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = 0.; } } else { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = static_cast<attention_scores_scalar>(exponential[kv_length_id - kv_length_start] / temp_storage[row_id_mem_offset + 1]); } } } } #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) std::tuple<at::Tensor, std::optional<std::vector<at::Tensor>>, at::Tensor> forward( const at::Tensor query, const at::Tensor query_pass, const at::Tensor key, const at::Tensor key_pass, const at::Tensor value, const std::optional<std::vector<at::Tensor>> layer_past, const at::Tensor attention_mask, const std::optional<at::Tensor> head_mask, const float inv_norm_factor, const int num_heads, const bool use_cache ) { auto query_layer = at::cat({query, query_pass}, 3); auto key_layer = at::cat({key, key_pass}, 3); auto value_layer = value; if (layer_past) { const auto past_key = (*layer_past).at(0); const auto past_value = (*layer_past).at(1); key_layer = at::cat({past_key, key_layer}, 2); value_layer = at::cat({past_value, value_layer}, 2); } std::optional<std::vector<at::Tensor>> present; if (use_cache) { present = {key_layer, value_layer}; } else { present = {}; } const auto batch_size = query_layer.size(0); const auto q_length = query_layer.size(2); const auto attn_head_size = query_layer.size(3); const auto batch_size_times_num_heads = batch_size * num_heads; const auto kv_length = key_layer.size(2); const auto query_view = query_layer.reshape({batch_size_times_num_heads, q_length, attn_head_size}); auto key_view = key_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}).transpose(1, 2); auto value_view = value_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}); auto query_scaled = query_view * inv_norm_factor; auto attention_scores = at::bmm(query_scaled, key_view); // Computing `optionally_cast_fp16_to_fp32 + masked_fill + softmax + cast_to_intial_dtype` at::Tensor attention_probs; if (true) { // TODO @thomasw21: it's easier to think of attention_scores as 2D tensors const auto attention_scores_2d = attention_scores.view({batch_size_times_num_heads * q_length, kv_length}); const auto attention_mask_2d = attention_mask.view({batch_size_times_num_heads * q_length, kv_length}); // Custom kernel attention_probs = at::empty_like(attention_scores_2d); // Check that inputs and contiguous + cuda tensors CHECK_INPUT(attention_scores_2d); CHECK_INPUT(attention_mask_2d); // TODO @thomas21: change by to this as it's cleaner when pytorch 1.13 comes out // DISPATCH_CASE_FLOATING_TYPES(attention_scores.scalar_type(), "masked_softmax", [&] { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, attention_scores.scalar_type(), "masked_softmax", [&] { /* * Understanding how GPUs work: https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ * A100 specifications: https://images.nvidia.com/aem-dam/en-zz/Solutions/data-center/nvidia-ampere-architecture-whitepaper.pdf * - SMs: 108 * - TPCs: 56 (What's that?) * - Memory size: 40 GB * - L2 Cache size: 40960 KB (shared across all SMs) * - L1/Shared memory size: 192 KB (shared across all threads within a SM) * - Max Threads / SM: 2048 * - Max Thread Blocks / SM: 32 */ /* * We should split [batch_size_times_num_heads_block, q_length] in seperate blocks and [batch_size_times_num_heads_block_size, kv_length] a single block * with multiple threads as we need to `sync_threads` to run exponential sum. * We maximise the usage of threads within a single block */ // TODO @thomasw21 figure out everything warp related: // - why do they have to be power of 2 // TODO @thomas21 check why everyone is setting 1024 when officially it's 2048 const auto MAX_THREADS_PER_SM = 1024; // TODO @thomasw21 figure out how to have longer sequences, currently the maximum is `max_kv_length = MAX_THREADS_PER_SM * MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD` const auto MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD = 4; // `effective_kv_length = ceil(kv_length / MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD)` const auto effective_kv_length = (kv_length - 1)/ MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD + 1; const auto rows_per_block = MAX_THREADS_PER_SM / effective_kv_length; const auto num_blocks = (batch_size_times_num_heads * q_length - 1) / rows_per_block + 1; const dim3 gridDim(num_blocks); // Number of blocks that run const dim3 blockDim(MAX_THREADS_PER_SM); // Number of threads that run per block const int shared_mem_forward = rows_per_block * 2 * sizeof(float); // 192 * 2 ** 10 // const auto MAX_L1_MEMORY = 196608; // const auto MAX_SMs = 108; // TORCH_CHECK(batch_size_times_num_heads * q_length <= MAX_L1_MEMORY, "Shared memory exceeds 192KB limitation."); // TORCH_CHECK(gridDim.x * gridDim.y * gridDim.z <= MAX_SMs, "A100s only have 108 SMs. Raising as require blocks is bigger."); // TORCH_CHECK(blockDim.x * blockDim.y * blockDim.z <= MAX_THREADS_PER_SM, "A100s only have 2048 threads per block. Raising as require requested threads is higher."); forward_masked_softmax_kernel<scalar_t, MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD><<<gridDim, blockDim, shared_mem_forward>>>( attention_scores_2d.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), attention_mask_2d.packed_accessor32<bool, 2, torch::RestrictPtrTraits>(), attention_probs.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), effective_kv_length, blockDim, rows_per_block, kv_length, batch_size_times_num_heads * q_length ); }); attention_probs = attention_probs.view({batch_size_times_num_heads, q_length, kv_length}); } else { // Pytorch C++ API auto input_dtype = attention_scores.scalar_type(); if (input_dtype == at::ScalarType::Float) { attention_scores = attention_scores.to(at::ScalarType::Float); }; // TODO @thomasw21 Figure out how to get minimum value auto attn_weights = attention_scores.masked_fill_(attention_mask, -1e34); attention_probs = attn_weights.softmax(-1, at::ScalarType::Float).to(input_dtype); } auto context_layer = attention_probs.bmm(value_view); // `_merge_heads` context_layer = context_layer.view({batch_size, num_heads, q_length, attn_head_size}); context_layer = context_layer.permute({0, 2, 1, 3}); context_layer = context_layer.reshape({batch_size, q_length, attn_head_size * num_heads}); return std::make_tuple(context_layer, present, attention_probs); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "forward", &forward, "GPT-Neox attention mechanism forward (CUDA)" ); }
e87ca7481e46e1721b60ac631a40d02cc1389f04.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void simpleKernel(int *data) { // this adds a value to a variable stored in global memory data[threadIdx.x] += 2 * (blockIdx.x + threadIdx.x); } int main() { const int numElems = 4; int hostArray[numElems], *devArray; // allocate memory on the device; zero out all entries in this device array hipMalloc((void **)&devArray, sizeof(int) * numElems); hipMemset(devArray, 0, numElems * sizeof(int)); // invoke GPU kernel, with one block that has four threads hipLaunchKernelGGL(( simpleKernel), dim3(1), dim3(numElems), 0, 0, devArray); // bring the result back from the GPU into the hostArray hipMemcpy(&hostArray, devArray, sizeof(int) * numElems, hipMemcpyDeviceToHost); // print out the result to confirm that things are looking good printf("Values stored in hostArray: \n"); for (int i = 0; i < numElems; i++) printf("%d\n", hostArray[i]); // release the memory allocated on the GPU hipFree(devArray); return 0; }
e87ca7481e46e1721b60ac631a40d02cc1389f04.cu
#include <cuda.h> #include <stdio.h> __global__ void simpleKernel(int *data) { // this adds a value to a variable stored in global memory data[threadIdx.x] += 2 * (blockIdx.x + threadIdx.x); } int main() { const int numElems = 4; int hostArray[numElems], *devArray; // allocate memory on the device; zero out all entries in this device array cudaMalloc((void **)&devArray, sizeof(int) * numElems); cudaMemset(devArray, 0, numElems * sizeof(int)); // invoke GPU kernel, with one block that has four threads simpleKernel<<<1, numElems>>>(devArray); // bring the result back from the GPU into the hostArray cudaMemcpy(&hostArray, devArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost); // print out the result to confirm that things are looking good printf("Values stored in hostArray: \n"); for (int i = 0; i < numElems; i++) printf("%d\n", hostArray[i]); // release the memory allocated on the GPU cudaFree(devArray); return 0; }
retinaface_decode.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <common/cuda_tools.hpp> namespace RetinaFace{ static const int NUM_BOX_ELEMENT = 16; __constant__ float variances[] = {0.1f, 0.2f}; static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __device__ float sigmoid(float x){ return 1.0f / (1.0f + exp(-x)); } static __global__ void decode_kernel( float* predict, int num_bboxes, float deconfidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior_array ){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + 16 * position; // cx, cy, w, h, neg_conf, pos_conf, landmark0.x, landmark0.y, landmark1.x, landmark1.y, landmark2.x, landmark2.y float neg_deconfidence = pitem[4]; float pos_deconfidence = pitem[5]; float object_deconfidence = (pos_deconfidence - neg_deconfidence); if(object_deconfidence < deconfidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float* prior = prior_array + 4 * position; float cx = prior[0] + pitem[0] * variances[0] * prior[2]; float cy = prior[1] + pitem[1] * variances[0] * prior[3]; float width = prior[2] * exp(pitem[2] * variances[1]); float height = prior[3] * exp(pitem[3] * variances[1]); float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = sigmoid(object_deconfidence); *pout_item++ = 0; float* landmark_predict = pitem + 6; for(int i = 0; i < 5; ++i){ float x = prior[0] + landmark_predict[0] * variances[0] * prior[2]; float y = prior[1] + landmark_predict[1] * variances[0] * prior[3]; affine_project(invert_affine_matrix, x, y, pout_item + 0, pout_item + 1); pout_item += 2; landmark_predict += 2; } } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // ioub > abia // pcurrent[5] = -1; return; } } } } static float desigmoid(float x){ return -log(1.0f / x - 1.0f); } void decode_kernel_invoker( float* predict, int num_bboxes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior, hipStream_t stream ){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); hipLaunchKernelGGL(( checkCudaKernel(decode_kernel), dim3(grid), dim3(block), 0, stream, predict, num_bboxes, desigmoid(confidence_threshold), nms_threshold, invert_affine_matrix, parray, max_objects, prior )); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); hipLaunchKernelGGL(( checkCudaKernel(nms_kernel), dim3(grid), dim3(block), 0, stream, parray, max_objects, nms_threshold)); } };
retinaface_decode.cu
#include <common/cuda_tools.hpp> namespace RetinaFace{ static const int NUM_BOX_ELEMENT = 16; __constant__ float variances[] = {0.1f, 0.2f}; static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __device__ float sigmoid(float x){ return 1.0f / (1.0f + exp(-x)); } static __global__ void decode_kernel( float* predict, int num_bboxes, float deconfidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior_array ){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + 16 * position; // cx, cy, w, h, neg_conf, pos_conf, landmark0.x, landmark0.y, landmark1.x, landmark1.y, landmark2.x, landmark2.y float neg_deconfidence = pitem[4]; float pos_deconfidence = pitem[5]; float object_deconfidence = (pos_deconfidence - neg_deconfidence); if(object_deconfidence < deconfidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float* prior = prior_array + 4 * position; float cx = prior[0] + pitem[0] * variances[0] * prior[2]; float cy = prior[1] + pitem[1] * variances[0] * prior[3]; float width = prior[2] * exp(pitem[2] * variances[1]); float height = prior[3] * exp(pitem[3] * variances[1]); float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = sigmoid(object_deconfidence); *pout_item++ = 0; float* landmark_predict = pitem + 6; for(int i = 0; i < 5; ++i){ float x = prior[0] + landmark_predict[0] * variances[0] * prior[2]; float y = prior[1] + landmark_predict[1] * variances[0] * prior[3]; affine_project(invert_affine_matrix, x, y, pout_item + 0, pout_item + 1); pout_item += 2; landmark_predict += 2; } } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // 如果发现iou大,并且b > a,置信度。b是第i个框,a是当前框 // 表示当前框要过滤掉,不需要保留了 pcurrent[5] = -1; return; } } } } static float desigmoid(float x){ return -log(1.0f / x - 1.0f); } void decode_kernel_invoker( float* predict, int num_bboxes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior, cudaStream_t stream ){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>( predict, num_bboxes, desigmoid(confidence_threshold), nms_threshold, invert_affine_matrix, parray, max_objects, prior )); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); checkCudaKernel(nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } };
5f15f95f380e40dd4a0d435af6af24a20af59aeb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <hip/hip_runtime.h> #include <assert.h> #include <stdio.h> #include "globals.hpp" //////////////////////////////////////////////////////////////////////////////// // export the callable function extern "C" void compute_v4(float* reference, float* input_image, aoi* aoi_coordinates, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width); __device__ void thread_allocator_v4(float* input_image, aoi* aoi_coordinates, unsigned int image_width, unsigned int sn, aoi* thread_state, float* image_parts) { // thread allocator unsigned int ta = blockIdx.x; unsigned int thx = threadIdx.x; /// output decoding unsigned int offset = ta*(image_width/N); unsigned int local_end = thread_state->end - thread_state->start; if (thx <= local_end) { image_parts[sn*image_width + offset + thx] = input_image[sn*image_width + thread_state->start + thx]; } if (thx == local_end + 1) image_parts[sn*image_width + offset + thx] = -1; // to terminate the image part /// next state __syncthreads(); if (thx==0) if ((aoi_coordinates + sn * N + ta)->start != -1) *thread_state = aoi_coordinates[sn * N + ta]; __syncthreads(); /// output decoding } __device__ float preproc_image_v4(float* image_parts, unsigned int image_width) { unsigned int noz = blockIdx.x; float imgPreproc2DDFA = 0; float* imgpre = image_parts + (image_width / N) * noz; while (*(imgpre) != -1) { imgPreproc2DDFA += WHITE_VALUE - *(imgpre); imgpre++; } return imgPreproc2DDFA; } __device__ void auto_correlate_v4(float imgPreproc2DDFA, int* parallelSW, unsigned int sn, float* ac_samples, int* ac_sw, int ac_ignore_it, float* ac_sampWin, float* autoCorrToCombSubMul) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; //// output decoding if (thx<2*(*ac_sw)) ac_sampWin[thx] = ac_samples[ac_ignore_it + thx]; if (thx==0) ac_sampWin[2*(*ac_sw)] = ac_samples[ac_ignore_it + 2*(*ac_sw)]; __syncthreads(); if (thx<2* (*ac_sw)) for (int c = 0; c <= (*ac_sw) * 2; c++) { int d = c + (thx-(*ac_sw)) + 1; int k = (thx-(*ac_sw)) + (*ac_sw); if ((d >= 0) && (d < (*ac_sw) * 2)) autoCorrToCombSubMul[(*ac_sw) * 2 - k - 1] += ac_sampWin[c] * ac_sampWin[d]; } __syncthreads(); //// next state int ac_temp = parallelSW[sn * N + noz]; if (ac_temp != -1) (*ac_sw) = ac_temp; __shared__ float ac_samples_temp[BUFFER_SIZE]; if (thx<BUFFER_SIZE) ac_samples_temp[thx] = ac_samples[thx+1]; __syncthreads(); if (thx<BUFFER_SIZE) ac_samples[thx] = ac_samples_temp[thx]; if (thx==0) ac_samples[BUFFER_SIZE] = imgPreproc2DDFA; } __device__ void cross_correlate_v4(int ac_ignore_it, int* ac_sw, float* ac_sampWin, float* cc_coefs, float* parallelCoeffs, unsigned int sn, float* xCorrToCombSubMul) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; //// output decoding __syncthreads(); if (thx<2*(*ac_sw)) for (int c = 0; c <= (*ac_sw) * 2; c++) { int d = c + (thx-(*ac_sw)) + 1; int k = (thx-(*ac_sw)) + (*ac_sw); if ((d >= 0) && (d < (*ac_sw) * 2)) xCorrToCombSubMul[(*ac_sw) * 2 - k - 1] += ac_sampWin[c] * cc_coefs[ac_ignore_it + d]; } __syncthreads(); //// next state float* cc_temp = parallelCoeffs + sn * N * BUFFER_SIZE + noz * BUFFER_SIZE; if (*cc_temp != -1) if (thx<BUFFER_SIZE) cc_coefs[thx] = cc_temp[thx]; } __device__ void submul_v4(float* combSubMulToCombAvgSub, float* xCorrToCombSubMul, float* autoCorrToCombSubMul, unsigned int win_size) { int i=threadIdx.x; if (i < win_size) combSubMulToCombAvgSub[i] = (xCorrToCombSubMul[i] - autoCorrToCombSubMul[i]) / autoCorrToCombSubMul[i]; } __device__ void avgsub_v4(float* combSubMulToCombAvgSub, float* combAvgSubtoOutBlock, unsigned int win_size) { int thx=threadIdx.x; __shared__ float as_average; if (thx==0) { as_average = 0; for (int i = 0; i < win_size; i++) as_average += combSubMulToCombAvgSub[i]; as_average /= win_size; } __syncthreads(); if (thx < win_size) combAvgSubtoOutBlock[thx] = combSubMulToCombAvgSub[thx] - as_average; } __device__ void out_block_v4(float* reference, unsigned int sn, float* out_buffer, float* combAvgSubtoOutBlock, unsigned int win_size) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // output /// output decoding reference[sn * N + noz] = out_buffer[0]; /// next state __shared__ float out_buffer_temp[BUFFER_SIZE]; if (thx<BUFFER_SIZE) out_buffer_temp[thx] = out_buffer[thx+1]; __syncthreads(); if (thx<BUFFER_SIZE) out_buffer[thx] = out_buffer_temp[thx]; out_buffer[BUFFER_SIZE] = 0; unsigned int out_ignore_it = (BUFFER_SIZE - win_size) / 2; if (thx < BUFFER_SIZE - (2 * out_ignore_it)) out_buffer_temp[thx + out_ignore_it] = out_buffer[thx + out_ignore_it] + combAvgSubtoOutBlock[thx]; __syncthreads(); if (thx < BUFFER_SIZE - (2 * out_ignore_it)) out_buffer[thx+ out_ignore_it] = out_buffer_temp[thx+ out_ignore_it]; } __global__ void computeThreadAllocator_v4(float* input_image, aoi* aoi_coordinates, unsigned int image_height, unsigned int image_width, float* image_parts, aoi* g_thread_state ) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // state variables /// thread allocator __shared__ aoi thread_state; //// initialization if (thx==0) thread_state = g_thread_state[noz]; __syncthreads(); for (unsigned int sn=0;sn<image_height;sn++) { // thread allocator thread_allocator_v4(input_image, aoi_coordinates, image_width, sn, &thread_state, image_parts); } if (thx==0) g_thread_state[noz] = thread_state; __syncthreads(); } __global__ void computeThread_v4(float* reference, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width, float* image_parts, float* g_ac_samples, int* g_ac_sw, float* g_cc_coefs, float* g_out_buffer ) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // state variables /// auto correlation __shared__ float ac_samples[BUFFER_SIZE + 1]; __shared__ int ac_sw; /// cross correlation __shared__ float cc_coefs[BUFFER_SIZE + 1]; /// output block __shared__ float out_buffer[BUFFER_SIZE + 1]; //// initialization if (thx==0) ac_sw = g_ac_sw[noz]; if (thx<BUFFER_SIZE+1) { ac_samples[thx] = g_ac_samples[noz*(BUFFER_SIZE+1) + thx]; cc_coefs [thx] = g_cc_coefs [noz*(BUFFER_SIZE+1) + thx]; out_buffer[thx] = g_out_buffer[noz*(BUFFER_SIZE+1) + thx]; } // inter-block communication __shared__ int ac_ignore_it; __shared__ float imgPreproc2DDFA; __shared__ float autoCorrToCombSubMul[BUFFER_SIZE]; __shared__ float ac_sampWin[(BUFFER_SIZE*2+1)]; __shared__ float xCorrToCombSubMul[BUFFER_SIZE]; __shared__ float combSubMulToCombAvgSub[BUFFER_SIZE]; __shared__ float combAvgSubtoOutBlock[BUFFER_SIZE]; for (unsigned int sn=0;sn<image_height;sn++) { // for all cuda blocks (nozzles): // pre-process image: inv and reduce imgPreproc2DDFA = preproc_image_v4(image_parts+sn*image_width, image_width); // single DDFA /// auto correlation if (thx<BUFFER_SIZE) { autoCorrToCombSubMul[thx] = 0; ac_sampWin[thx] = 0; ac_sampWin[BUFFER_SIZE+thx] = 0; } if (thx==0) { ac_ignore_it = BUFFER_SIZE / 2 - ac_sw; ac_sampWin[2*BUFFER_SIZE] = 0; } __syncthreads(); auto_correlate_v4(imgPreproc2DDFA, parallelSW, sn, ac_samples, &ac_sw, ac_ignore_it, ac_sampWin, autoCorrToCombSubMul); /// cross correlation /// note: we use the ac_samples, ac_ignore_it, ac_sampWin and ac_sw from the auto correlation stage if (thx<BUFFER_SIZE) xCorrToCombSubMul[thx] = 0; __syncthreads(); //// output decoding cross_correlate_v4(ac_ignore_it, &ac_sw, ac_sampWin, cc_coefs, parallelCoeffs, sn, xCorrToCombSubMul); // subtract and multiply ((x-y)/y) submul_v4(combSubMulToCombAvgSub, xCorrToCombSubMul, autoCorrToCombSubMul, ac_sw * 2); // average and subtract avgsub_v4(combSubMulToCombAvgSub, combAvgSubtoOutBlock, ac_sw * 2); // output /// output decoding out_block_v4(reference, sn, out_buffer, combAvgSubtoOutBlock, ac_sw * 2); } // write back the state variables to the global space if (thx==0) g_ac_sw[noz] = ac_sw; if (thx<BUFFER_SIZE+1) { g_ac_samples[noz*(BUFFER_SIZE+1) + thx] = ac_samples[thx]; g_cc_coefs [noz*(BUFFER_SIZE+1) + thx] = cc_coefs [thx]; g_out_buffer[noz*(BUFFER_SIZE+1) + thx] = out_buffer[thx]; } __syncthreads(); } void compute_v4(float* reference, float* input_image, aoi* aoi_coordinates, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width) { // allocate/transfer data on/to to the device float* d_reference; assert(hipSuccess == hipMalloc((void **) &d_reference, N*CHUNK_LINES * sizeof(float))); float* d_input_image; assert(hipSuccess == hipMalloc((void **) &d_input_image, image_width*CHUNK_LINES * sizeof(float))); aoi* d_aoi_coordinates; assert(hipSuccess == hipMalloc((void **) &d_aoi_coordinates, N*CHUNK_LINES * sizeof(aoi))); float* d_parallelCoeffs; assert(hipSuccess == hipMalloc((void **) &d_parallelCoeffs, BUFFER_SIZE*N*CHUNK_LINES * sizeof(float))); int* d_parallelSW; assert(hipSuccess == hipMalloc((void **) &d_parallelSW, N*CHUNK_LINES * sizeof(int))); float* image_parts; assert(hipSuccess == hipMalloc((void **) &image_parts, image_width*CHUNK_LINES * sizeof(float))); // state variables /// thread allocator aoi* thread_state; assert(hipSuccess == hipMalloc((void **) &thread_state, N * sizeof(aoi))); assert(hipSuccess == hipMemset((void*)thread_state, 0, N * sizeof(aoi))); /// image preprocessor /// auto correlation float* ac_samples; assert(hipSuccess == hipMalloc((void **) &ac_samples, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(hipSuccess == hipMemset((void*)ac_samples, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); int* ac_sw; assert(hipSuccess == hipMalloc((void **) &ac_sw, N * sizeof(int))); assert(hipSuccess == hipMemset((void*)ac_sw, 0, N * sizeof(int))); /// cross correlation float* cc_coefs; assert(hipSuccess == hipMalloc((void **) &cc_coefs, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(hipSuccess == hipMemset((void*)cc_coefs, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); /// output block float* out_buffer; assert(hipSuccess == hipMalloc((void **) &out_buffer, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(hipSuccess == hipMemset((void*)out_buffer, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); // computation for (int i=0;i<image_height;i+=CHUNK_LINES) { // number of lines to be processes in this iteration unsigned int num_lines = min( CHUNK_LINES, image_height-i); hipMemcpy(d_input_image, input_image + i*image_width, image_width*num_lines * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_aoi_coordinates, aoi_coordinates + i*N, N*num_lines * sizeof(aoi), hipMemcpyHostToDevice); hipMemcpy(d_parallelCoeffs, parallelCoeffs + i*BUFFER_SIZE*N, BUFFER_SIZE*N*num_lines * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_parallelSW, parallelSW + i*N, N*num_lines * sizeof(float), hipMemcpyHostToDevice); unsigned int cuda_threads = ((image_width/N)/32+1)*32; hipLaunchKernelGGL(( computeThreadAllocator_v4), dim3(N),dim3(cuda_threads), 0, 0, d_input_image, d_aoi_coordinates, num_lines, image_width, image_parts, thread_state); if ( hipSuccess != hipGetLastError() ) printf( "Error in kernel call!\n" ); cuda_threads = ((BUFFER_SIZE+1)/32+1)*32; hipLaunchKernelGGL(( computeThread_v4), dim3(N),dim3(cuda_threads), 0, 0, d_reference, d_parallelCoeffs, d_parallelSW, num_lines, image_width, image_parts, ac_samples, ac_sw, cc_coefs, out_buffer); if ( hipSuccess != hipGetLastError() ) printf( "Error in kernel call!\n" ); assert(hipSuccess == hipMemcpy(reference + i*N, d_reference, N*num_lines * sizeof(float), hipMemcpyDeviceToHost)); } hipFree(image_parts); hipFree(d_parallelSW); hipFree(d_parallelCoeffs); hipFree(d_aoi_coordinates); hipFree(d_input_image); hipFree(d_reference); }
5f15f95f380e40dd4a0d435af6af24a20af59aeb.cu
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cuda.h> #include <assert.h> #include <stdio.h> #include "globals.hpp" //////////////////////////////////////////////////////////////////////////////// // export the callable function extern "C" void compute_v4(float* reference, float* input_image, aoi* aoi_coordinates, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width); __device__ void thread_allocator_v4(float* input_image, aoi* aoi_coordinates, unsigned int image_width, unsigned int sn, aoi* thread_state, float* image_parts) { // thread allocator unsigned int ta = blockIdx.x; unsigned int thx = threadIdx.x; /// output decoding unsigned int offset = ta*(image_width/N); unsigned int local_end = thread_state->end - thread_state->start; if (thx <= local_end) { image_parts[sn*image_width + offset + thx] = input_image[sn*image_width + thread_state->start + thx]; } if (thx == local_end + 1) image_parts[sn*image_width + offset + thx] = -1; // to terminate the image part /// next state __syncthreads(); if (thx==0) if ((aoi_coordinates + sn * N + ta)->start != -1) *thread_state = aoi_coordinates[sn * N + ta]; __syncthreads(); /// output decoding } __device__ float preproc_image_v4(float* image_parts, unsigned int image_width) { unsigned int noz = blockIdx.x; float imgPreproc2DDFA = 0; float* imgpre = image_parts + (image_width / N) * noz; while (*(imgpre) != -1) { imgPreproc2DDFA += WHITE_VALUE - *(imgpre); imgpre++; } return imgPreproc2DDFA; } __device__ void auto_correlate_v4(float imgPreproc2DDFA, int* parallelSW, unsigned int sn, float* ac_samples, int* ac_sw, int ac_ignore_it, float* ac_sampWin, float* autoCorrToCombSubMul) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; //// output decoding if (thx<2*(*ac_sw)) ac_sampWin[thx] = ac_samples[ac_ignore_it + thx]; if (thx==0) ac_sampWin[2*(*ac_sw)] = ac_samples[ac_ignore_it + 2*(*ac_sw)]; __syncthreads(); if (thx<2* (*ac_sw)) for (int c = 0; c <= (*ac_sw) * 2; c++) { int d = c + (thx-(*ac_sw)) + 1; int k = (thx-(*ac_sw)) + (*ac_sw); if ((d >= 0) && (d < (*ac_sw) * 2)) autoCorrToCombSubMul[(*ac_sw) * 2 - k - 1] += ac_sampWin[c] * ac_sampWin[d]; } __syncthreads(); //// next state int ac_temp = parallelSW[sn * N + noz]; if (ac_temp != -1) (*ac_sw) = ac_temp; __shared__ float ac_samples_temp[BUFFER_SIZE]; if (thx<BUFFER_SIZE) ac_samples_temp[thx] = ac_samples[thx+1]; __syncthreads(); if (thx<BUFFER_SIZE) ac_samples[thx] = ac_samples_temp[thx]; if (thx==0) ac_samples[BUFFER_SIZE] = imgPreproc2DDFA; } __device__ void cross_correlate_v4(int ac_ignore_it, int* ac_sw, float* ac_sampWin, float* cc_coefs, float* parallelCoeffs, unsigned int sn, float* xCorrToCombSubMul) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; //// output decoding __syncthreads(); if (thx<2*(*ac_sw)) for (int c = 0; c <= (*ac_sw) * 2; c++) { int d = c + (thx-(*ac_sw)) + 1; int k = (thx-(*ac_sw)) + (*ac_sw); if ((d >= 0) && (d < (*ac_sw) * 2)) xCorrToCombSubMul[(*ac_sw) * 2 - k - 1] += ac_sampWin[c] * cc_coefs[ac_ignore_it + d]; } __syncthreads(); //// next state float* cc_temp = parallelCoeffs + sn * N * BUFFER_SIZE + noz * BUFFER_SIZE; if (*cc_temp != -1) if (thx<BUFFER_SIZE) cc_coefs[thx] = cc_temp[thx]; } __device__ void submul_v4(float* combSubMulToCombAvgSub, float* xCorrToCombSubMul, float* autoCorrToCombSubMul, unsigned int win_size) { int i=threadIdx.x; if (i < win_size) combSubMulToCombAvgSub[i] = (xCorrToCombSubMul[i] - autoCorrToCombSubMul[i]) / autoCorrToCombSubMul[i]; } __device__ void avgsub_v4(float* combSubMulToCombAvgSub, float* combAvgSubtoOutBlock, unsigned int win_size) { int thx=threadIdx.x; __shared__ float as_average; if (thx==0) { as_average = 0; for (int i = 0; i < win_size; i++) as_average += combSubMulToCombAvgSub[i]; as_average /= win_size; } __syncthreads(); if (thx < win_size) combAvgSubtoOutBlock[thx] = combSubMulToCombAvgSub[thx] - as_average; } __device__ void out_block_v4(float* reference, unsigned int sn, float* out_buffer, float* combAvgSubtoOutBlock, unsigned int win_size) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // output /// output decoding reference[sn * N + noz] = out_buffer[0]; /// next state __shared__ float out_buffer_temp[BUFFER_SIZE]; if (thx<BUFFER_SIZE) out_buffer_temp[thx] = out_buffer[thx+1]; __syncthreads(); if (thx<BUFFER_SIZE) out_buffer[thx] = out_buffer_temp[thx]; out_buffer[BUFFER_SIZE] = 0; unsigned int out_ignore_it = (BUFFER_SIZE - win_size) / 2; if (thx < BUFFER_SIZE - (2 * out_ignore_it)) out_buffer_temp[thx + out_ignore_it] = out_buffer[thx + out_ignore_it] + combAvgSubtoOutBlock[thx]; __syncthreads(); if (thx < BUFFER_SIZE - (2 * out_ignore_it)) out_buffer[thx+ out_ignore_it] = out_buffer_temp[thx+ out_ignore_it]; } __global__ void computeThreadAllocator_v4(float* input_image, aoi* aoi_coordinates, unsigned int image_height, unsigned int image_width, float* image_parts, aoi* g_thread_state ) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // state variables /// thread allocator __shared__ aoi thread_state; //// initialization if (thx==0) thread_state = g_thread_state[noz]; __syncthreads(); for (unsigned int sn=0;sn<image_height;sn++) { // thread allocator thread_allocator_v4(input_image, aoi_coordinates, image_width, sn, &thread_state, image_parts); } if (thx==0) g_thread_state[noz] = thread_state; __syncthreads(); } __global__ void computeThread_v4(float* reference, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width, float* image_parts, float* g_ac_samples, int* g_ac_sw, float* g_cc_coefs, float* g_out_buffer ) { unsigned int noz = blockIdx.x; unsigned int thx = threadIdx.x; // state variables /// auto correlation __shared__ float ac_samples[BUFFER_SIZE + 1]; __shared__ int ac_sw; /// cross correlation __shared__ float cc_coefs[BUFFER_SIZE + 1]; /// output block __shared__ float out_buffer[BUFFER_SIZE + 1]; //// initialization if (thx==0) ac_sw = g_ac_sw[noz]; if (thx<BUFFER_SIZE+1) { ac_samples[thx] = g_ac_samples[noz*(BUFFER_SIZE+1) + thx]; cc_coefs [thx] = g_cc_coefs [noz*(BUFFER_SIZE+1) + thx]; out_buffer[thx] = g_out_buffer[noz*(BUFFER_SIZE+1) + thx]; } // inter-block communication __shared__ int ac_ignore_it; __shared__ float imgPreproc2DDFA; __shared__ float autoCorrToCombSubMul[BUFFER_SIZE]; __shared__ float ac_sampWin[(BUFFER_SIZE*2+1)]; __shared__ float xCorrToCombSubMul[BUFFER_SIZE]; __shared__ float combSubMulToCombAvgSub[BUFFER_SIZE]; __shared__ float combAvgSubtoOutBlock[BUFFER_SIZE]; for (unsigned int sn=0;sn<image_height;sn++) { // for all cuda blocks (nozzles): // pre-process image: inv and reduce imgPreproc2DDFA = preproc_image_v4(image_parts+sn*image_width, image_width); // single DDFA /// auto correlation if (thx<BUFFER_SIZE) { autoCorrToCombSubMul[thx] = 0; ac_sampWin[thx] = 0; ac_sampWin[BUFFER_SIZE+thx] = 0; } if (thx==0) { ac_ignore_it = BUFFER_SIZE / 2 - ac_sw; ac_sampWin[2*BUFFER_SIZE] = 0; } __syncthreads(); auto_correlate_v4(imgPreproc2DDFA, parallelSW, sn, ac_samples, &ac_sw, ac_ignore_it, ac_sampWin, autoCorrToCombSubMul); /// cross correlation /// note: we use the ac_samples, ac_ignore_it, ac_sampWin and ac_sw from the auto correlation stage if (thx<BUFFER_SIZE) xCorrToCombSubMul[thx] = 0; __syncthreads(); //// output decoding cross_correlate_v4(ac_ignore_it, &ac_sw, ac_sampWin, cc_coefs, parallelCoeffs, sn, xCorrToCombSubMul); // subtract and multiply ((x-y)/y) submul_v4(combSubMulToCombAvgSub, xCorrToCombSubMul, autoCorrToCombSubMul, ac_sw * 2); // average and subtract avgsub_v4(combSubMulToCombAvgSub, combAvgSubtoOutBlock, ac_sw * 2); // output /// output decoding out_block_v4(reference, sn, out_buffer, combAvgSubtoOutBlock, ac_sw * 2); } // write back the state variables to the global space if (thx==0) g_ac_sw[noz] = ac_sw; if (thx<BUFFER_SIZE+1) { g_ac_samples[noz*(BUFFER_SIZE+1) + thx] = ac_samples[thx]; g_cc_coefs [noz*(BUFFER_SIZE+1) + thx] = cc_coefs [thx]; g_out_buffer[noz*(BUFFER_SIZE+1) + thx] = out_buffer[thx]; } __syncthreads(); } void compute_v4(float* reference, float* input_image, aoi* aoi_coordinates, float* parallelCoeffs, int* parallelSW, unsigned int image_height, unsigned int image_width) { // allocate/transfer data on/to to the device float* d_reference; assert(cudaSuccess == cudaMalloc((void **) &d_reference, N*CHUNK_LINES * sizeof(float))); float* d_input_image; assert(cudaSuccess == cudaMalloc((void **) &d_input_image, image_width*CHUNK_LINES * sizeof(float))); aoi* d_aoi_coordinates; assert(cudaSuccess == cudaMalloc((void **) &d_aoi_coordinates, N*CHUNK_LINES * sizeof(aoi))); float* d_parallelCoeffs; assert(cudaSuccess == cudaMalloc((void **) &d_parallelCoeffs, BUFFER_SIZE*N*CHUNK_LINES * sizeof(float))); int* d_parallelSW; assert(cudaSuccess == cudaMalloc((void **) &d_parallelSW, N*CHUNK_LINES * sizeof(int))); float* image_parts; assert(cudaSuccess == cudaMalloc((void **) &image_parts, image_width*CHUNK_LINES * sizeof(float))); // state variables /// thread allocator aoi* thread_state; assert(cudaSuccess == cudaMalloc((void **) &thread_state, N * sizeof(aoi))); assert(cudaSuccess == cudaMemset((void*)thread_state, 0, N * sizeof(aoi))); /// image preprocessor /// auto correlation float* ac_samples; assert(cudaSuccess == cudaMalloc((void **) &ac_samples, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(cudaSuccess == cudaMemset((void*)ac_samples, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); int* ac_sw; assert(cudaSuccess == cudaMalloc((void **) &ac_sw, N * sizeof(int))); assert(cudaSuccess == cudaMemset((void*)ac_sw, 0, N * sizeof(int))); /// cross correlation float* cc_coefs; assert(cudaSuccess == cudaMalloc((void **) &cc_coefs, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(cudaSuccess == cudaMemset((void*)cc_coefs, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); /// output block float* out_buffer; assert(cudaSuccess == cudaMalloc((void **) &out_buffer, N * (BUFFER_SIZE + 1) * sizeof(float))); assert(cudaSuccess == cudaMemset((void*)out_buffer, 0, N * (BUFFER_SIZE + 1) * sizeof(float))); // computation for (int i=0;i<image_height;i+=CHUNK_LINES) { // number of lines to be processes in this iteration unsigned int num_lines = min( CHUNK_LINES, image_height-i); cudaMemcpy(d_input_image, input_image + i*image_width, image_width*num_lines * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_aoi_coordinates, aoi_coordinates + i*N, N*num_lines * sizeof(aoi), cudaMemcpyHostToDevice); cudaMemcpy(d_parallelCoeffs, parallelCoeffs + i*BUFFER_SIZE*N, BUFFER_SIZE*N*num_lines * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_parallelSW, parallelSW + i*N, N*num_lines * sizeof(float), cudaMemcpyHostToDevice); unsigned int cuda_threads = ((image_width/N)/32+1)*32; computeThreadAllocator_v4<<<N,cuda_threads>>>(d_input_image, d_aoi_coordinates, num_lines, image_width, image_parts, thread_state); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in kernel call!\n" ); cuda_threads = ((BUFFER_SIZE+1)/32+1)*32; computeThread_v4<<<N,cuda_threads>>>(d_reference, d_parallelCoeffs, d_parallelSW, num_lines, image_width, image_parts, ac_samples, ac_sw, cc_coefs, out_buffer); if ( cudaSuccess != cudaGetLastError() ) printf( "Error in kernel call!\n" ); assert(cudaSuccess == cudaMemcpy(reference + i*N, d_reference, N*num_lines * sizeof(float), cudaMemcpyDeviceToHost)); } cudaFree(image_parts); cudaFree(d_parallelSW); cudaFree(d_parallelCoeffs); cudaFree(d_aoi_coordinates); cudaFree(d_input_image); cudaFree(d_reference); }
8cdef9a2de4a6d8d3acab735b4fe634fb48ed90b.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; __constant__ int c_row[64516/4]; #define spmv_NBLOCKS 12*8*21 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,hipReadModeElementType> tex_val; texture<int,1,hipReadModeElementType> tex_col; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); //__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; //if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) //rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; //__syncthreads(); if (myRow < dim) { int warpStart = c_row[myRow]; int warpEnd = c_row[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = tex1Dfetch(tex_col,j); mySum += tex1Dfetch(tex_val,j) *vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpyToSymbol(c_row,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); // hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); hipBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float)); hipBindTexture(0,tex_col,d_spmv_cols,spmv_nItems * sizeof(int)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
8cdef9a2de4a6d8d3acab735b4fe634fb48ed90b.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; __constant__ int c_row[64516/4]; #define spmv_NBLOCKS 12*8*21 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,cudaReadModeElementType> tex_val; texture<int,1,cudaReadModeElementType> tex_col; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); //__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; //if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) //rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; //__syncthreads(); if (myRow < dim) { int warpStart = c_row[myRow]; int warpEnd = c_row[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = tex1Dfetch(tex_col,j); mySum += tex1Dfetch(tex_val,j) *vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_row,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); // cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); cudaBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float)); cudaBindTexture(0,tex_col,d_spmv_cols,spmv_nItems * sizeof(int)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
94040597c5c029712ee18c674085a2758140bdd8.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020-2022 XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "device_adapter_hip.cuh" #include "ellpack_page.cuh" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost { namespace data { void IterativeDMatrix::InitFromCUDA(DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(hipGetDevice(&current_device)); auto get_device = [&]() -> int32_t { int32_t d = (ctx_.gpu_id == Context::kCpuId) ? current_device : ctx_.gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx_.gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(hipSetDevice(get_device())); if (cols == 0) { cols = num_cols(); rabit::Allreduce<rabit::op::Max>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, batch_param_.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, batch_param_.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = ::max(row_stride, Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); dh::safe_cuda(hipSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), batch_param_.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts); } else { GetCutsFromRef(ref, Info().num_col_, batch_param_, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(hipSetDevice(get_device())); auto rows = num_rows(); dh::caching_device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(BatchParam const& param) { CheckParam(param); if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_ && ghist_) { ellpack_.reset(new EllpackPage()); // Evaluation QuantileDMatrix initialized from CPU data might not have the correct GPU // ID. if (this->ctx_.IsCPU()) { this->ctx_.gpu_id = param.gpu_id; } if (this->ctx_.IsCPU()) { this->ctx_.gpu_id = dh::CurrentDevice(); } this->Info().feature_types.SetDevice(this->ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace data } // namespace xgboost
94040597c5c029712ee18c674085a2758140bdd8.cu
/*! * Copyright 2020-2022 XGBoost contributors */ #include <algorithm> #include <memory> #include <type_traits> #include "../common/hist_util.cuh" #include "device_adapter.cuh" #include "ellpack_page.cuh" #include "iterative_dmatrix.h" #include "proxy_dmatrix.cuh" #include "proxy_dmatrix.h" #include "simple_batch_iterator.h" #include "sparse_page_source.h" namespace xgboost { namespace data { void IterativeDMatrix::InitFromCUDA(DataIterHandle iter_handle, float missing, std::shared_ptr<DMatrix> ref) { // A handle passed to external iterator. DMatrixProxy* proxy = MakeProxy(proxy_); CHECK(proxy); // The external iterator auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{iter_handle, reset_, next_}; dh::XGBCachingDeviceAllocator<char> alloc; auto num_rows = [&]() { return Dispatch(proxy, [](auto const& value) { return value.NumRows(); }); }; auto num_cols = [&]() { return Dispatch(proxy, [](auto const& value) { return value.NumCols(); }); }; size_t row_stride = 0; size_t nnz = 0; // Sketch for all batches. std::vector<common::SketchContainer> sketch_containers; size_t batches = 0; size_t accumulated_rows = 0; bst_feature_t cols = 0; int32_t current_device; dh::safe_cuda(cudaGetDevice(&current_device)); auto get_device = [&]() -> int32_t { int32_t d = (ctx_.gpu_id == Context::kCpuId) ? current_device : ctx_.gpu_id; CHECK_NE(d, Context::kCpuId); return d; }; /** * Generate quantiles */ common::HistogramCuts cuts; do { // We use do while here as the first batch is fetched in ctor ctx_.gpu_id = proxy->DeviceIdx(); CHECK_LT(ctx_.gpu_id, common::AllVisibleGPUs()); dh::safe_cuda(cudaSetDevice(get_device())); if (cols == 0) { cols = num_cols(); rabit::Allreduce<rabit::op::Max>(&cols, 1); this->info_.num_col_ = cols; } else { CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns."; } if (!ref) { sketch_containers.emplace_back(proxy->Info().feature_types, batch_param_.max_bin, cols, num_rows(), get_device()); auto* p_sketch = &sketch_containers.back(); proxy->Info().weights_.SetDevice(get_device()); Dispatch(proxy, [&](auto const& value) { common::AdapterDeviceSketch(value, batch_param_.max_bin, proxy->Info(), missing, p_sketch); }); } auto batch_rows = num_rows(); accumulated_rows += batch_rows; dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); row_stride = std::max(row_stride, Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); })); nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(), row_counts.end()); batches++; } while (iter.Next()); iter.Reset(); dh::safe_cuda(cudaSetDevice(get_device())); if (!ref) { HostDeviceVector<FeatureType> ft; common::SketchContainer final_sketch( sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(), batch_param_.max_bin, cols, accumulated_rows, get_device()); for (auto const& sketch : sketch_containers) { final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data()); final_sketch.FixError(); } sketch_containers.clear(); sketch_containers.shrink_to_fit(); final_sketch.MakeCuts(&cuts); } else { GetCutsFromRef(ref, Info().num_col_, batch_param_, &cuts); } this->info_.num_row_ = accumulated_rows; this->info_.num_nonzero_ = nnz; auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows, get_device]() { if (!ellpack_) { // Should be put inside the while loop to protect against empty batch. In // that case device id is invalid. ellpack_.reset(new EllpackPage); *(ellpack_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(), row_stride, accumulated_rows); } }; /** * Generate gradient index. */ size_t offset = 0; iter.Reset(); size_t n_batches_for_verification = 0; while (iter.Next()) { init_page(); dh::safe_cuda(cudaSetDevice(get_device())); auto rows = num_rows(); dh::caching_device_vector<size_t> row_counts(rows + 1, 0); common::Span<size_t> row_counts_span(row_counts.data().get(), row_counts.size()); Dispatch(proxy, [=](auto const& value) { return GetRowCounts(value, row_counts_span, get_device(), missing); }); auto is_dense = this->IsDense(); proxy->Info().feature_types.SetDevice(get_device()); auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan(); auto new_impl = Dispatch(proxy, [&](auto const& value) { return EllpackPageImpl(value, missing, get_device(), is_dense, row_counts_span, d_feature_types, row_stride, rows, cuts); }); size_t num_elements = ellpack_->Impl()->Copy(get_device(), &new_impl, offset); offset += num_elements; proxy->Info().num_row_ = num_rows(); proxy->Info().num_col_ = cols; if (batches != 1) { this->info_.Extend(std::move(proxy->Info()), false, true); } n_batches_for_verification++; } CHECK_EQ(batches, n_batches_for_verification) << "Different number of batches returned between 2 iterations"; if (batches == 1) { this->info_ = std::move(proxy->Info()); this->info_.num_nonzero_ = nnz; CHECK_EQ(proxy->Info().labels.Size(), 0); } iter.Reset(); // Synchronise worker columns rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1); } BatchSet<EllpackPage> IterativeDMatrix::GetEllpackBatches(BatchParam const& param) { CheckParam(param); if (!ellpack_ && !ghist_) { LOG(FATAL) << "`QuantileDMatrix` not initialized."; } if (!ellpack_ && ghist_) { ellpack_.reset(new EllpackPage()); // Evaluation QuantileDMatrix initialized from CPU data might not have the correct GPU // ID. if (this->ctx_.IsCPU()) { this->ctx_.gpu_id = param.gpu_id; } if (this->ctx_.IsCPU()) { this->ctx_.gpu_id = dh::CurrentDevice(); } this->Info().feature_types.SetDevice(this->ctx_.gpu_id); *ellpack_->Impl() = EllpackPageImpl(&ctx_, *this->ghist_, this->Info().feature_types.ConstDeviceSpan()); } CHECK(ellpack_); auto begin_iter = BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(ellpack_)); return BatchSet<EllpackPage>(begin_iter); } void GetCutsFromEllpack(EllpackPage const& page, common::HistogramCuts* cuts) { *cuts = page.Impl()->Cuts(); } } // namespace data } // namespace xgboost
9f93f79f7d6d7ea41b4dd1ddcdadd17aabc9cc05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> void init(float *x, int s){ int i=0; for(i=0; i<s; i++){ x[i]=1.0f * (float)i; } } __global__ void compute(float *x, float *y, int s){ int index =threadIdx.x; int stride = blockDim.x; int i=0; for(i=index; i<s; i+=stride){ y[i]= x[i]*x[i]; } } int main(){ int N = 1<<27; float *x;// = malloc(sizeof(float)*N); float *y;// = malloc(sizeof(float)*N); hipMallocManaged(&x, sizeof(float)*N); hipMallocManaged(&y, sizeof(float)*N); init(x, N); init(y, N); int i=0; while(1){ hipLaunchKernelGGL(( compute), dim3(2),dim3(1024), 0, 0, x, y, N); } hipDeviceSynchronize(); for(i=0; i<N; i++){ // printf("%d %f %f\n", i, x[i], y[i]); } }
9f93f79f7d6d7ea41b4dd1ddcdadd17aabc9cc05.cu
#include <stdio.h> #include <stdlib.h> void init(float *x, int s){ int i=0; for(i=0; i<s; i++){ x[i]=1.0f * (float)i; } } __global__ void compute(float *x, float *y, int s){ int index =threadIdx.x; int stride = blockDim.x; int i=0; for(i=index; i<s; i+=stride){ y[i]= x[i]*x[i]; } } int main(){ int N = 1<<27; float *x;// = malloc(sizeof(float)*N); float *y;// = malloc(sizeof(float)*N); cudaMallocManaged(&x, sizeof(float)*N); cudaMallocManaged(&y, sizeof(float)*N); init(x, N); init(y, N); int i=0; while(1){ compute<<<2,1024>>>(x, y, N); } cudaDeviceSynchronize(); for(i=0; i<N; i++){ // printf("%d %f %f\n", i, x[i], y[i]); } }
813db589cc89f68059df41f745c2e4289ea4615c.hip
// !!! This is a file automatically generated by hipify!!! #include "gtest/gtest.h" #include "hipfft.h" #include "thirdparty/fftw3.h" //Peasoup fix for fftw3 header #include <thrust/complex.h> #include <thrust/random.h> #include <thrust/random/normal_distribution.h> #include "misc/system.cuh" #include "data_types/frequencyseries.cuh" #include "data_types/timeseries.cuh" #include "transforms/fft.cuh" using namespace peasoup; template <System system> void test_case(size_t size) { int ii; typedef thrust::complex<float> complex; thrust::minstd_rand rng; thrust::random::normal_distribution<float> dist(0.0f, 1.0f); type::TimeSeries<HOST,float> hin; hin.data.resize(size); hin.metadata.tsamp = 0.000064; hin.metadata.dm = 0; hin.metadata.acc = 0; for (ii=0;ii<size;ii++) hin.data[ii] = dist(rng); type::TimeSeries<system,float> din = hin; type::FrequencySeries<system,complex> dout; transform::RealToComplexFFT<system> r2cfft(din,dout); r2cfft.prepare(); float out_binwidth = 1.0/(din.data.size()*din.metadata.tsamp); ASSERT_TRUE(fabs((dout.metadata.binwidth-out_binwidth)/out_binwidth)<0.0001); ASSERT_EQ(din.metadata.dm,dout.metadata.dm); ASSERT_EQ(din.metadata.acc,dout.metadata.acc); ASSERT_EQ(dout.data.size(),din.data.size()/2+1); r2cfft.execute(); transform::ComplexToRealFFT<system> c2rfft(dout,din); c2rfft.prepare(); size_t new_size = 2*(dout.data.size() - 1); float out_tsamp = 1.0/(dout.metadata.binwidth * new_size); ASSERT_TRUE(fabs((din.metadata.tsamp-out_tsamp)/out_tsamp)<0.0001); ASSERT_EQ(din.metadata.dm,dout.metadata.dm); ASSERT_EQ(din.metadata.acc,dout.metadata.acc); ASSERT_EQ(din.data.size(),new_size); c2rfft.execute(); type::TimeSeries<HOST,float> hout = din; for (ii=0;ii<size;ii++) { float in = hin.data[ii]; float out = hout.data[ii]/size; ASSERT_NEAR(in,out,0.0001); } } TEST(FFTTest,TestR2CHost) { test_case<HOST>(1<<23); } TEST(FFTTest,TestR2CDevice) { test_case<DEVICE>(1<<23); }
813db589cc89f68059df41f745c2e4289ea4615c.cu
#include "gtest/gtest.h" #include "cufft.h" #include "thirdparty/fftw3.h" //Peasoup fix for fftw3 header #include <thrust/complex.h> #include <thrust/random.h> #include <thrust/random/normal_distribution.h> #include "misc/system.cuh" #include "data_types/frequencyseries.cuh" #include "data_types/timeseries.cuh" #include "transforms/fft.cuh" using namespace peasoup; template <System system> void test_case(size_t size) { int ii; typedef thrust::complex<float> complex; thrust::minstd_rand rng; thrust::random::normal_distribution<float> dist(0.0f, 1.0f); type::TimeSeries<HOST,float> hin; hin.data.resize(size); hin.metadata.tsamp = 0.000064; hin.metadata.dm = 0; hin.metadata.acc = 0; for (ii=0;ii<size;ii++) hin.data[ii] = dist(rng); type::TimeSeries<system,float> din = hin; type::FrequencySeries<system,complex> dout; transform::RealToComplexFFT<system> r2cfft(din,dout); r2cfft.prepare(); float out_binwidth = 1.0/(din.data.size()*din.metadata.tsamp); ASSERT_TRUE(fabs((dout.metadata.binwidth-out_binwidth)/out_binwidth)<0.0001); ASSERT_EQ(din.metadata.dm,dout.metadata.dm); ASSERT_EQ(din.metadata.acc,dout.metadata.acc); ASSERT_EQ(dout.data.size(),din.data.size()/2+1); r2cfft.execute(); transform::ComplexToRealFFT<system> c2rfft(dout,din); c2rfft.prepare(); size_t new_size = 2*(dout.data.size() - 1); float out_tsamp = 1.0/(dout.metadata.binwidth * new_size); ASSERT_TRUE(fabs((din.metadata.tsamp-out_tsamp)/out_tsamp)<0.0001); ASSERT_EQ(din.metadata.dm,dout.metadata.dm); ASSERT_EQ(din.metadata.acc,dout.metadata.acc); ASSERT_EQ(din.data.size(),new_size); c2rfft.execute(); type::TimeSeries<HOST,float> hout = din; for (ii=0;ii<size;ii++) { float in = hin.data[ii]; float out = hout.data[ii]/size; ASSERT_NEAR(in,out,0.0001); } } TEST(FFTTest,TestR2CHost) { test_case<HOST>(1<<23); } TEST(FFTTest,TestR2CDevice) { test_case<DEVICE>(1<<23); }
c15bd6e880450fa50537fd536a9d6576f0c8342f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cutil_math.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/count.h> #define PI 3.14159f #define MAXP 40960 #define MAXPI 4096 #define MAXN 96 #define MAXG 409600 #define THREADS 256 struct pair { int key; int value; }; struct grid { float oX, oY, oZ; float size; int nX, nY, nZ; }; struct simulation { float minX, maxX; float minY, maxY; float minZ, maxZ; float dt; int tsn; int ssi; int nsi; }; struct load { float minX, maxX; float minY, maxY; float minZ, maxZ; float gx; float gy; float gz; float w; }; struct fix { float minX, maxX; float minY, maxY; float minZ, maxZ; float velX, velY, velZ; }; struct outlet { float oX, oY, oZ; float nX, nY, nZ; float R; }; struct inlet { float oX, oY, oZ; float nX, nY, nZ; float R; int Material; float Mass, Smooth; float Velocity; float Density, Energy; float Distance; }; struct model { int pn; int* Material; float* Mass; float* Smooth; float* PosX; float* PosY; float* PosZ; float* VelX; float* VelY; float* VelZ; float* Density; float* Energy; float* Pressure; float* Sound; float* VelDotX; float* VelDotY; float* VelDotZ; float* DensityDot; float* EnergyDot; float* PosX0; float* PosY0; float* PosZ0; float* VelX0; float* VelY0; float* VelZ0; float* Density0; float* Energy0; int* List; int* Hash; int* Index; int* SetStart; int* SetStop; int* IntDummy; float* FloatDummy; }; // Host Variables int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; struct load hLoad[10]; struct fix hFix[10]; struct outlet hOut[10]; struct inlet hIn[10]; // Device Variables __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ __constant__ struct load dLoad[10]; __device__ __constant__ struct fix dFix[10]; __device__ __constant__ struct outlet dOut[10]; __device__ __constant__ struct inlet dIn[10]; __host__ __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); w = 0.0f; if (q < 2) { w = powf((1.0f - 0.5f*q),4); w *= 1.0f + 2.0f*q; w *= alpha; } return w; } __host__ __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); dwdr = 0.0f; if (q < 2) { dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ; dwdr *= alpha / h; } return dwdr; } float pressureGasHost(int mat ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = dMatProp[mat][1] * pshift = dMatProp[mat][2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = (hMatProp[mat][1] - 1.0) * rho * u; p += hMatProp[mat][2]; // c = sqrtf(hMatProp[mat][1] * (hMatProp[mat][1] - 1.0) * u); return p; } float pressurePolyHost(int mat , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = dMatProp[mat][0]; * a1 = dMatProp[mat][1]; * a2 = dMatProp[mat][2]; * a3 = dMatProp[mat][3]; * b0 = dMatProp[mat][4]; * b1 = dMatProp[mat][5]; * t1 = dMatProp[mat][6]; * t2 = dMatProp[mat][7]; * pmin = dMatProp[mat][8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; // float c; mu = (rho - hMatProp[mat][0]) / hMatProp[mat][0]; if (mu < 0) p = (hMatProp[mat][6] * mu + hMatProp[mat][7] * mu*mu) + (hMatProp[mat][4] * hMatProp[mat][0] * u); else p = (hMatProp[mat][1] * mu + hMatProp[mat][2] * mu*mu + hMatProp[mat][3] * mu*mu*mu) + ((hMatProp[mat][4] + hMatProp[mat][5] * mu) * hMatProp[mat][0] * u); if (p < hMatProp[mat][8]) p = hMatProp[mat][8]; // c = sqrtf(hMatProp[mat][1] / rho); return p; } float pressureShockHost(int mat, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * g0 = dMatProp[mat][2]; * s0 = dMatProp[mat][3]; * pmin = dMatProp[mat][4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; // float c; mu = (rho - hMatProp[mat][0]) / hMatProp[mat][0]; ph = (hMatProp[mat][0] * powf(hMatProp[mat][1], 2) * mu*(1.0 +mu)) / powf((1.0 - (hMatProp[mat][3] -1.0) * mu), 2); p = ph + hMatProp[mat][2] * hMatProp[mat][0] * (u - (0.5 * ph / hMatProp[mat][0] * (mu / (1.0 + mu)))); if (p < hMatProp[mat][4]) p = hMatProp[mat][4]; // c = hMatProp[mat][1]; return p; } float pressureTaitHost(int mat, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * pmin = dMatProp[mat][2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = hMatProp[mat][0] * powf(hMatProp[mat][1], 2) / 7.0 * (powf((rho / hMatProp[mat][0]), 7) - 1.0); if (p < hMatProp[mat][2]) p = hMatProp[mat][2]; // c = hMatProp[mat][1]; return p; } // Global code void balanceMassMomentumHost(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; float dx, dy, dz, dr, dvr, dwdr, f, w, w0; for (ip = 0; ip < pn; ip++) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); w0 = kernelWendland(0.0f, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action //f = -(Pressure[ip] + Pressure[jp]) // / (Density[ip] * Density[jp]); f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.5f * w / w0 * Sound[ip] * Sound[jp] / iSmooth; iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __global__ void balanceMassMomentumDevice(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; volatile float dx, dy, dz, dr, dvr, dwdr, f, w, w0, q; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); if (Material[ip] == Material[jp]) { dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.5f * w * Mass[jp] / Density[jp] / Smooth[jp] * powf(Sound[jp], 2); iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } void balanceEnergyHost(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; for (ip = 0; ip < pn; ip++) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __global__ void balanceEnergyDevice(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __host__ __device__ float pressureGas(float* properties, float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = (properties[1] - 1.0f) * rho * u; p += properties[2]; return p; } __host__ __device__ float pressurePoly(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; mu = (rho - properties[0]) / properties[0]; if (mu < 0) p = (properties[6] * mu + properties[7] * mu*mu) + (properties[4] * properties[0] * u); else p = (properties[1] * mu + properties[2] * mu*mu + properties[3] * mu*mu*mu) + ((properties[4] + properties[5] * mu) * properties[0] * u); //if (p < properties[8]) p = properties[8]; return p; } __host__ __device__ float pressureShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; mu = (rho - properties[0]) / properties[0]; ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu)) / powf((1.0f - (properties[3] -1.0f) * mu), 2); p = ph + properties[2] * properties[0] * (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu)))); //if (p < properties[4]) p = properties[4]; return p; } __host__ __device__ float pressureTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = properties[0] * powf(properties[1], 2) / 7.0f * (powf((rho / properties[0]), 7) - 1.0f); //if (p < properties[2]) p = properties[2]; return p; } __host__ __device__ float soundGas(float* properties ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] * (properties[1] - 1.0f) * u); return c; } __host__ __device__ float soundPoly(float* properties , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] / rho); return c; } __host__ __device__ float soundShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float soundTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float densityPoly(float* properties , float rho) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityShock(float* properties, float rho) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityTait(float* properties, float rho) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } void updateParticlesHost(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; if (iMaterial <= 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } for (i = 0; i < 10; i++) if ((PosX[ip] > hFix[i].minX) && (PosX[ip] < hFix[i].maxX) && (PosY[ip] > hFix[i].minY) && (PosY[ip] < hFix[i].maxY) && (PosZ[ip] > hFix[i].minZ) && (PosZ[ip] < hFix[i].maxZ)) { VelX[ip] = hFix[i].velX; VelY[ip] = hFix[i].velY; VelZ[ip] = hFix[i].velZ; } iMaterial = abs(iMaterial); if (hMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; Density[ip] = Density0[ip]; Energy[ip] = Energy0[ip]; } switch (hMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __global__ void updateParticlesDevice(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; for (i = 0; i < 10; i++) if ((PosX[ip] > dFix[i].minX) && (PosX[ip] < dFix[i].maxX) && (PosY[ip] > dFix[i].minY) && (PosY[ip] < dFix[i].maxY) && (PosZ[ip] > dFix[i].minZ) && (PosZ[ip] < dFix[i].maxZ)) { VelX[ip] = dFix[i].velX; VelY[ip] = dFix[i].velY; VelZ[ip] = dFix[i].velZ; } if (dMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (dMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } void updateLoadsHost(const int pn, const int* Material, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; for (ip = 0; ip < pn; ip++) { if (Material[ip] > 0) { for (i = 0; i < 10; i++) { if ((PosX[ip] > hLoad[i].minX) && (PosX[ip] < hLoad[i].maxX) && (PosZ[ip] < hLoad[i].maxZ) && (PosY[ip] > hLoad[i].minY) && (PosY[ip] < hLoad[i].maxY) && (PosZ[ip] > hLoad[i].minZ) && (PosZ[ip] < hLoad[i].maxZ)) { VelDotX[ip] += hLoad[i].gx; VelDotY[ip] += hLoad[i].gy; VelDotZ[ip] += hLoad[i].gz; EnergyDot[ip] += hLoad[i].w; } } } } } __global__ void updateLoadsDevice(const int pn, const int* Material, const float* PosX, const float* PosY, const float* PosZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if ((ip < pn) && (Material[ip] > 0)) { for (i = 0; i < 10; i++) { if ((PosX[ip] > dLoad[i].minX) && (PosX[ip] < dLoad[i].maxX) && (PosZ[ip] < dLoad[i].maxZ) && (PosY[ip] > dLoad[i].minY) && (PosY[ip] < dLoad[i].maxY) && (PosZ[ip] > dLoad[i].minZ) && (PosZ[ip] < dLoad[i].maxZ)) { VelDotX[ip] += dLoad[i].gx; VelDotY[ip] += dLoad[i].gy; VelDotZ[ip] += dLoad[i].gz; EnergyDot[ip] += dLoad[i].w; } } } } // Host code int initHost(struct model *hm) { hm->Material = (int *) malloc(MAXP * sizeof(int)); hm->Mass = (float *) malloc(MAXP * sizeof(float)); hm->Smooth = (float *) malloc(MAXP * sizeof(float)); hm->PosX = (float *) malloc(MAXP * sizeof(float)); hm->PosY = (float *) malloc(MAXP * sizeof(float)); hm->PosZ = (float *) malloc(MAXP * sizeof(float)); hm->VelX = (float *) malloc(MAXP * sizeof(float)); hm->VelY = (float *) malloc(MAXP * sizeof(float)); hm->VelZ = (float *) malloc(MAXP * sizeof(float)); hm->Density = (float *) malloc(MAXP * sizeof(float)); hm->Energy = (float *) malloc(MAXP * sizeof(float)); hm->Pressure = (float *) malloc(MAXP * sizeof(float)); hm->Sound = (float *) malloc(MAXP * sizeof(float)); hm->VelDotX = (float *) malloc(MAXP * sizeof(float)); hm->VelDotY = (float *) malloc(MAXP * sizeof(float)); hm->VelDotZ = (float *) malloc(MAXP * sizeof(float)); hm->DensityDot = (float *) malloc(MAXP * sizeof(float)); hm->EnergyDot = (float *) malloc(MAXP * sizeof(float)); hm->PosX0 = (float *) malloc(MAXP * sizeof(float)); hm->PosY0 = (float *) malloc(MAXP * sizeof(float)); hm->PosZ0 = (float *) malloc(MAXP * sizeof(float)); hm->VelX0 = (float *) malloc(MAXP * sizeof(float)); hm->VelY0 = (float *) malloc(MAXP * sizeof(float)); hm->VelZ0 = (float *) malloc(MAXP * sizeof(float)); hm->Density0 = (float *) malloc(MAXP * sizeof(float)); hm->Energy0 = (float *) malloc(MAXP * sizeof(float)); hm->Hash = (int *) malloc(MAXP * sizeof(int)); hm->Index = (int *) malloc(MAXP * sizeof(int)); hm->List = (int *) malloc(MAXP * MAXN * sizeof(int)); hm->IntDummy = (int *) malloc(MAXP * sizeof(int)); hm->FloatDummy = (float *) malloc(MAXP * sizeof(float)); hm->SetStart = (int *) malloc(MAXG * sizeof(int)); hm->SetStop = (int *) malloc(MAXG * sizeof(int)); return 0; } int initDevice(struct model *dm) { size_t available, total; hipMalloc((void**) &(dm->Material), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->Mass), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Density), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Energy), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Sound), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Density0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->Hash), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->Index), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int))); hipMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int))); hipMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float))); hipMalloc((void**) &(dm->SetStart), (MAXG * sizeof(int))); hipMalloc((void**) &(dm->SetStop), (MAXG * sizeof(int))); hipMemGetInfo(&available, &total); printf("Available memory %d of %d MB\n", available/1024/1024, total/1024/1024); return 0; } int copyHostToDevice(struct model *hm, struct model *dm) { dm->pn = hm->pn; hipMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->SetStart, hm->SetStart, (MAXG * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->SetStop, hm->SetStop, (MAXG * sizeof(int)), hipMemcpyHostToDevice); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.oZ = hGrid.oZ; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.nZ = hGrid.nZ; dGrid.size = hGrid.size; hipMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)); hipMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)); //hipMemcpyToSymbol("dGrid", &hGrid, sizeof(struct grid)); hipMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)); hipMemcpyToSymbol("dLoad", &hLoad, 10 * sizeof(struct load)); hipMemcpyToSymbol("dFix", &hFix, 10 * sizeof(struct fix)); hipMemcpyToSymbol("dIn", &hIn, 10 * sizeof(struct inlet)); hipMemcpyToSymbol("dOut", &hOut, 10 * sizeof(struct outlet)); return 0; } int copyDeviceToHost(struct model *dm, struct model *hm) { hm->pn = dm->pn; hipMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost); hipMemcpy(hm->SetStart, dm->SetStart, (MAXG * sizeof(int)), hipMemcpyDeviceToHost); hipMemcpy(hm->SetStop, dm->SetStop, (MAXG * sizeof(int)), hipMemcpyDeviceToHost); hGrid.oX = dGrid.oX; hGrid.oY = dGrid.oY; hGrid.oZ = dGrid.oZ; hGrid.nX = dGrid.nX; hGrid.nY = dGrid.nY; hGrid.nZ = dGrid.nZ; hGrid.size = dGrid.size; return 0; } int backupDataHost(struct model *hm) { memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float)); memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float)); memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float)); memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float)); memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float)); memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float)); memcpy(hm->Density0, hm->Density, MAXP * sizeof(float)); memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float)); return 0; } int backupDataDevice(struct model *dm) { hipMemcpy(dm->PosX0, dm->PosX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->PosY0, dm->PosY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->PosZ0, dm->PosZ, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->VelX0, dm->VelX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->VelY0, dm->VelY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->VelZ0, dm->VelZ, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->Density0, dm->Density, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); hipMemcpy(dm->Energy0, dm->Energy, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice); return 0; } /* int initRun() { FILE *stream; char tok[10]; int i, m, p, pn; int iv; float fv; int mpn, mpp[10]; // Open stream file stream = fopen("armando.run", "r"); while (!feof(stream)) { sprintf(tok, " "); fscanf(stream, "%s", tok); if (strcmp(tok, "MAT") == 0) { fscanf(stream, "%i", &iv); if ((iv > 0) && (iv <= 50)) m = iv; for (p = 0; p < 10; p++) hMatProp[m][p] = 0.0; if ((m > 0) && (m <= 10)) pn = 3; if ((m > 10) && (m <= 20)) pn = 9; if ((m > 20) && (m <= 30)) pn = 10; if ((m > 30) && (m <= 40)) pn = 5; if ((m > 40) && (m <= 50)) pn = 3; for (p = 0; p < pn; p++) { fscanf(stream, "%f", &fv); hMatProp[m][p] = fv; } printf("Material %d\n", m); printf("hMatProp: \n"); for (p = 0; p < pn; p++) printf(" %f\n", hMatProp[m][p]); printf("\n"); } if (strcmp(tok, "TIME") == 0) { fscanf(stream, "%f", &fv); if (fv > 0.0) hRun.dt = fv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.tsn = iv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.ssi = iv; printf("Time step: %f\n", hRun.dt); printf("Steps: %i\n", hRun.tsn); printf("Save step: %i\n", hRun.ssi); printf("\n"); } if (strcmp(tok, "LIMITS") == 0) { fscanf(stream, "%f", &fv); hRun.minX = fv; fscanf(stream, "%f", &fv); hRun.maxX = fv; fscanf(stream, "%f", &fv); hRun.minY = fv; fscanf(stream, "%f", &fv); hRun.maxY = fv; printf("Domain limits: \n"); printf("X: %+e - %+e \n", hRun.minX, hRun.maxX); printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY); printf("\n"); } if (strcmp(tok, "MONITORS") == 0) { fscanf(stream, "%i", &iv); mpn = iv; for (i = 0; i < mpn; i++) { fscanf(stream, "%i", &iv); mpp[i] = iv; } printf("Monitored particles: %i \n", mpn); if (mpn > 0) { printf("Index:"); for (i = 0; i < mpn; i++) printf(" %i", mpp[i]); printf("\n"); printf("\n"); } } } fclose(stream); hSound = hSmooth / hRun.dt; return 0; } int scanData() { FILE *stream; int i; float fv1, fv2, fv3; int iv; // Stream file position stream = fopen("in_pos.txt", "r"); for (i = 0; !feof(stream); i++) { fscanf(stream, "%e %e ", &fv1, &fv2); hPosX[i] = fv1; hPosY[i] = fv2; } fclose(stream); hPN = i; // Stream file velocity stream = fopen("in_vel.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e", &fv1, &fv2); hVelX[i] = fv1; hVelY[i] = fv2; } fclose(stream); // Stream file info stream = fopen("in_info.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2); hMaterial[i] = iv; hMass = fv1; hSmooth = fv2; } fclose(stream); // Stream file field stream = fopen("in_field.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e %e ", &fv1, &fv2, &fv3); hDensity[i] = fv1; hPressure[i] = fv2; hEnergy[i] = fv3; } fclose(stream); return 0; } */ int printData(struct model *hm) { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("new_pos.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); // Stream file velocity stream = fopen("new_vel.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); // Stream file info stream = fopen("new_info.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]); fclose(stream); // Stream file field stream = fopen("new_field.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Pressure[i], hm->Energy[i]); fclose(stream); /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %f %f %f\n", i, hm->Index[i], hm->Hash[i], hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); */ /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %f %f %f %f %f %f\n", i, hm->VelX[i], hm->VelY[i], hm->VelZ[i], hm->Density[i], hm->Energy[i], hm->Pressure[i]); fclose(stream); */ // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %d %d\n", i, hm->Index[i], hm->Hash[i], hm->SetStart[hm->Hash[i]], hm->SetStop[hm->Hash[i]]); fclose(stream); /* for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (int j = 0; j < MAXN; j++) printf("%d ", hm->List[i * MAXN +j]); printf("\n"); } */ return 0; } int outputVTK(struct model *hm, int ss) { /** * \brief Output Data file * * Saves vtk data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "out%05d.vtk", ss); stream = fopen(filename, "w"); fprintf(stream, "# vtk DataFile Version 2.0\n"); fprintf(stream, "Unstructured Grid Example\n"); fprintf(stream, "ASCII\n"); fprintf(stream, "DATASET UNSTRUCTURED_GRID\n"); fprintf(stream, "POINTS %i float\n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %i \n", 1, i); fprintf(stream, "CELL_TYPES %i \n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i \n", 1); fprintf(stream, "POINT_DATA %i \n", hm->pn); fprintf(stream, "SCALARS material int 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+d \n", hm->Material[i]); fprintf(stream, "SCALARS density float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Density[i]); fprintf(stream, "SCALARS pressure float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Pressure[i]); fprintf(stream, "SCALARS energy float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Energy[i]); fprintf(stream, "VECTORS velocity float\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]); printf("\n\n\n"); for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]); for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]); printf("\n"); } */ return 0; } void initFree(struct model *hm) { int i, j, k, m, b, pi; double rho, c0, pmin; double dr; m = 1; b = 2; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1; // x4 pi = 0; for (k = 0; k < 10; k++) { for (j = 0; j < 10; j++) { for (i = 0; i < 10; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 0; k < 10; k++) { for (j = -2; j < -1; j++) { for (i = 0; i < 10; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = 2.0e-3; //1.0e-3; hRun.tsn = 600; //1000; hRun.ssi = 200; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Freefall\n"); printf("Particles: %i \n", hm->pn); } void initBox(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 9; m = 1; b = 2; rho = 1000.; c0 = 40.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 15 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < 20 * q +2; k++) { for (j = -2; j < -1; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < -1; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 20 * q +1; k < 20 * q +2 ; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 20 * q +1; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < -1; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 20 * q +1; k++) { for (j = -1; j < 15 * q; j++) { for (i = 20 * q +1; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 800 * q; hRun.ssi = 20 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Box\n"); printf("Particles: %i \n", hm->pn); } void initBath(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 15; m = 1; b = 2; rho = 1000.; c0 = 40.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 10 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < 10 * q +2; k++) { for (j = -2; j < -1; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < -1; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 10 * q +1; k < 10 * q +2 ; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 10 * q +1; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < -1; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 10 * q +1; k++) { for (j = -1; j < 12 * q; j++) { for (i = 10 * q +1; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 1000 * q; hRun.ssi = 20 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; hOut[0].oX = 5 * q * dr; hOut[0].oY = dr; hOut[0].oZ = 5 * q * dr; hOut[0].nX = 0.0f; hOut[0].nY = 1.0f; hOut[0].nZ = 0.0f; hOut[0].R = 2.0f*q*dr; hIn[0].Material = m; hIn[0].Mass = rho * dr * dr * dr; hIn[0].Smooth = 1.2f * dr; hIn[0].Density = rho; hIn[0].Energy = 0.0f; hIn[0].oX = 0.f * q * dr; hIn[0].oY = 15.f * q * dr; hIn[0].oZ = 5.f * q * dr; hIn[0].nX = 1.0f; hIn[0].nY = 0.0f; hIn[0].nZ = 0.0f; hIn[0].Velocity = 1.5f; hIn[0].R = 2.0f *q*dr; printf("Bath\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } void initChannel(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 1; m = 1; b = 2; rho = 1000.; c0 = 20.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; /* for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 10 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } */ for (k = -10 * q -2; k <= 10 * q +2; k++) { for (j = -2; j <= -2; j++) { for (i = 0; i <= 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -10 * q -2; k <= -10 * q -2; k++) { for (j = -1; j <= 15 * q; j++) { for (i = 0; i < 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = 10 * q +2; k <= 10 * q +2 ; k++) { for (j = -1; j <= 15 * q; j++) { for (i = 0; i <= 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -0.5; hRun.maxX = 10.5; hRun.minY = -0.5; hRun.maxY = 3.0; hRun.minZ = -1.5; hRun.maxZ = 1.5; hRun.dt = dr / c0; hRun.tsn = 10000 * q; hRun.ssi = 200 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; hFix[0].minX = hRun.minX; hFix[0].maxX = 2.0f * q * dr; hFix[0].minY = hRun.minY; hFix[0].maxY = hRun.maxY; hFix[0].minZ = hRun.minZ; hFix[0].maxZ = hRun.maxZ; hFix[0].velX = 2.0f; hFix[0].velY = 0.0f; hFix[0].velZ = 0.0f; hFix[1].minX = 97 * q * dr; hFix[1].maxX = hRun.maxX; hFix[1].minY = hRun.minY; hFix[1].maxY = hRun.maxY; hFix[1].minZ = hRun.minZ; hFix[1].maxZ = hRun.maxZ; hFix[1].velX = 2.0f; hFix[1].velY = 0.0f; hFix[1].velZ = 0.0f; hOut[0].oX = 100 * q * dr; hOut[0].oY = 5 * q * dr; hOut[0].oZ = 0.0f; hOut[0].nX = -1.0f; hOut[0].nY = 0.0f; hOut[0].nZ = 0.0f; hOut[0].R = 20.0f*q*dr; hIn[0].Material = m; hIn[0].Mass = rho * dr * dr * dr; hIn[0].Smooth = 1.2f * dr; hIn[0].Density = rho; hIn[0].Energy = 0.0f; hIn[0].oX = 10.f * q * dr; hIn[0].oY = 20.f * q * dr; hIn[0].oZ = 0.f * q * dr; hIn[0].nX = 0.5f; hIn[0].nY = -0.5f; hIn[0].nZ = 0.0f; hIn[0].Velocity = 2.0f; hIn[0].R = 10.0f *q*dr; printf("Channel\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } void initDamBreak(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 4; m = 1; b = 2; rho = 1000.; c0 = 20.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.025 / q; pi = 0; for (k = -20 * q; k <= 20 * q; k++) { for (j = 0; j <= 22 * q; j++) { for (i = 0; i <= 49 * q; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -2; k <= 20 * q +2; k++) { for (j = -2; j <= -2; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -2; k <= -20 * q -2; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = 20 * q +2; k <= 20 * q +2; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -1; k <= 20 * q +1; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= -80 * q -2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -1; k <= 20 * q +1; k++) { for (j = -1; j <= 40 * q; j++) { for (i = 49 * q +2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -8 * q -1; k <= 8 * q +1; k++) { for (j = -0; j <= 6 * q -1; j++) { for (i = -53 * q +1; i <= -47 * q -1; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 2000 * q; hRun.ssi = 40 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Dam break\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } int iSort(int *array, int *perm, int n) { int i; static int* dummy = NULL; if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int mapCompare(const void *a, const void *b) { int c; struct pair m1, m2; c = 0; m1 = *(struct pair*)a; m2 = *(struct pair*)b; if (m1.key < m2.key) c = -1; if (m1.key > m2.key) c = 1; return c; } void updateHashHost(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; for (ip = 0; ip < pn; ip++) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; if (ic < 0) ic = 0; if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1; Hash[ip] = ic; } } __global__ void updateHashDevice(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; if (ic < 0) ic = 0; if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1; Hash[ip] = ic; } } void checkOutHost(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float dn, dr; int ip, i; for (ip = 0; ip < pn; ip++) { for (i = 0; i < 10; i++) { dn = 0.0f; dn += (PosX[ip] - hOut[i].oX) * hOut[i].nX; dn += (PosY[ip] - hOut[i].oY) * hOut[i].nY; dn += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ; dr = 0.0f; dr += powf((PosX[ip] - hOut[i].oX) - dn * hOut[i].nX, 2); dr += powf((PosY[ip] - hOut[i].oY) - dn * hOut[i].nY, 2); dr += powf((PosZ[ip] - hOut[i].oZ) - dn * hOut[i].nZ, 2); dr = sqrtf(dr); if ((dn < 0.0f) && (dr < hOut[i].R)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } if ((PosX[ip] > hRun.maxX) || (PosX[ip] < hRun.minX) || (PosY[ip] > hRun.maxY) || (PosY[ip] < hRun.minY) || (PosZ[ip] > hRun.maxZ) || (PosZ[ip] < hRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } __global__ void checkOutDevice(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float dn, dr; int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { for (i = 0; i < 10; i++) { dn = 0.0f; dn += (PosX[ip] - dOut[i].oX) * dOut[i].nX; dn += (PosY[ip] - dOut[i].oY) * dOut[i].nY; dn += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ; dr = 0.0f; dr += powf((PosX[ip] - dOut[i].oX) - dn * dOut[i].nX, 2); dr += powf((PosY[ip] - dOut[i].oY) - dn * dOut[i].nY, 2); dr += powf((PosZ[ip] - dOut[i].oZ) - dn * dOut[i].nZ, 2); dr = sqrtf(dr); if ((dn < 0.0f) && (dr < dOut[i].R)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } if ((PosX[ip] > dRun.maxX) || (PosX[ip] < dRun.minX) || (PosY[ip] > dRun.maxY) || (PosY[ip] < dRun.minY) || (PosZ[ip] > dRun.maxZ) || (PosZ[ip] < dRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } void inletHost(struct model *hm) { int i, iu, iv, n, p; float3 u, v, w, r; int material[MAXPI]; float mass[MAXPI], smooth[MAXPI]; float posX[MAXPI], posY[MAXPI], posZ[MAXPI]; float velX[MAXPI], velY[MAXPI], velZ[MAXPI]; float density[MAXPI], energy[MAXPI]; p = 0; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if (hIn[i].Distance > hIn[i].Smooth / 1.2f) { hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f; w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ); w = normalize(w); if ((w.x <= w.y) && (w.x <= w.z)) u = make_float3(1.f, 0.f, 0.f); if ((w.y <= w.x) && (w.y <= w.z)) u = make_float3(0.f, 1.f, 0.f); if ((w.z <= w.x) && (w.z <= w.y)) u = make_float3(0.f, 0.f, 1.f); v = cross(w, u); n = roundf(1.2f * hIn[i].R / hIn[i].Smooth); for (iv = -n; iv <= n; iv++) { for (iu = -n; iu <= n; iu++) { r = iu * u + iv * v; r *= hIn[i].Smooth / 1.2f; if (length(r) < hIn[i].R) { material[p] = hIn[i].Material; mass[p] = hIn[i].Mass; smooth[p] = hIn[i].Smooth; posX[p] = hIn[i].oX + r.x; posY[p] = hIn[i].oY + r.y; posZ[p] = hIn[i].oZ + r.z; velX[p] = hIn[i].Velocity * w.x; velY[p] = hIn[i].Velocity * w.y; velZ[p] = hIn[i].Velocity * w.z; density[p] = hIn[i].Density; energy[p] = hIn[i].Energy; p++; } } } } } hipMemcpy(hm->Material + hm->pn, material, (p * sizeof(int)), hipMemcpyHostToHost); hipMemcpy(hm->Mass + hm->pn, mass, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->Smooth + hm->pn, smooth, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->PosX + hm->pn, posX, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->PosY + hm->pn, posY, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->PosZ + hm->pn, posZ, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->VelX + hm->pn, velX, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->VelY + hm->pn, velY, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->VelZ + hm->pn, velZ, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->Density + hm->pn, density, (p * sizeof(float)), hipMemcpyHostToHost); hipMemcpy(hm->Energy + hm->pn, energy, (p * sizeof(float)), hipMemcpyHostToHost); hm->pn += p; } void inletDevice(struct model *dm) { int i, iu, iv, n, p; float3 u, v, w, r; int material[MAXPI]; float mass[MAXPI], smooth[MAXPI]; float posX[MAXPI], posY[MAXPI], posZ[MAXPI]; float velX[MAXPI], velY[MAXPI], velZ[MAXPI]; float density[MAXPI], energy[MAXPI]; p = 0; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if (hIn[i].Distance > hIn[i].Smooth / 1.2f) { hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f; w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ); w = normalize(w); if ((fabsf(w.x) <= fabsf(w.y)) && (fabsf(w.x) <= fabsf(w.z))) u = make_float3(1.f, 0.f, 0.f); if ((fabsf(w.y) <= fabsf(w.x)) && (fabsf(w.y) <= fabsf(w.z))) u = make_float3(0.f, 1.f, 0.f); if ((fabsf(w.z) <= fabsf(w.x)) && (fabsf(w.z) <= fabsf(w.y))) u = make_float3(0.f, 0.f, 1.f); v = cross(w, u); n = roundf(1.2f * hIn[i].R / hIn[i].Smooth); for (iv = -n; iv <= n; iv++) { for (iu = -n; iu <= n; iu++) { r = iu * u + iv * v; r *= hIn[i].Smooth / 1.2f; if (length(r) < hIn[i].R) { material[p] = hIn[i].Material; mass[p] = hIn[i].Mass; smooth[p] = hIn[i].Smooth; posX[p] = hIn[i].oX + r.x; posY[p] = hIn[i].oY + r.y; posZ[p] = hIn[i].oZ + r.z; velX[p] = hIn[i].Velocity * w.x; velY[p] = hIn[i].Velocity * w.y; velZ[p] = hIn[i].Velocity * w.z; density[p] = hIn[i].Density; energy[p] = hIn[i].Energy; p++; } } } } } hipMemcpy(dm->Material + dm->pn, material, (p * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(dm->Mass + dm->pn, mass, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Smooth + dm->pn, smooth, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosX + dm->pn, posX, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosY + dm->pn, posY, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->PosZ + dm->pn, posZ, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelX + dm->pn, velX, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelY + dm->pn, velY, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->VelZ + dm->pn, velZ, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Density + dm->pn, density, (p * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(dm->Energy + dm->pn, energy, (p * sizeof(float)), hipMemcpyHostToDevice); dm->pn += p; } void updateSetsHost(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; int hash, nextHash, prevHash; for (ip = 0; ip < pn; ip++) { hash = Hash[ip]; if (ip == 0) prevHash = -1; else prevHash = Hash[ip -1]; if (ip == pn -1) nextHash = -1; else nextHash = Hash[ip +1]; if (hash != prevHash) SetStart[hash] = ip; if (hash != nextHash) SetStop[hash] = ip +1; } } __global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; hash = Hash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = Hash[ip -1]; } if (threadIdx.x == THREADS -1) { if (ip == pn -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = Hash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1; } void updateListHost(const int pn, int *List, const int* SetStart, const int* SetStop, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const struct grid Grid) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled for (ip = 0; ip < pn; ip++) { ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) { for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } } __global__ void updateListDevice(const int pn, int *List, const int* SetStart, const int* SetStop, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const struct grid Grid) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) { for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } int neighbourListHost(struct model *hm) { struct pair map[MAXP]; int i, ip, pout; updateHashHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid); checkOutHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid); for (ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip; for (ip = 0; ip < hm->pn; ip++) { map[ip].key = hm->Hash[ip]; map[ip].value = hm->Index[ip]; } qsort(map, hm->pn, sizeof(struct pair), mapCompare); for (ip = 0; ip < hm->pn; ip++) { hm->Hash[ip] = map[ip].key; hm->Index[ip] = map[ip].value; } iSort(hm->Material, hm->Index, hm->pn); fSort(hm->Mass, hm->Index, hm->pn); fSort(hm->Smooth, hm->Index, hm->pn); fSort(hm->PosX, hm->Index, hm->pn); fSort(hm->PosY, hm->Index, hm->pn); fSort(hm->PosZ, hm->Index, hm->pn); fSort(hm->VelX, hm->Index, hm->pn); fSort(hm->VelY, hm->Index, hm->pn); fSort(hm->VelZ, hm->Index, hm->pn); fSort(hm->Density, hm->Index, hm->pn); fSort(hm->Energy, hm->Index, hm->pn); fSort(hm->Pressure, hm->Index, hm->pn); fSort(hm->Sound, hm->Index, hm->pn); pout = 0; for (ip = 0; ip < hm->pn; ip++) if (hm->Hash[ip] == hGrid.nX * hGrid.nY * hGrid.nZ) pout++; hm->pn -= pout; for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0; for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0; updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash); updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hGrid); return 0; } int neighbourListDevice(struct model *dm) { int pout; int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; thrust::device_ptr<int> tIndex(dm->Index); thrust::device_ptr<int> tHash(dm->Hash); thrust::device_ptr<int> tMaterial(dm->Material); thrust::device_ptr<float> tMass(dm->Mass); thrust::device_ptr<float> tSmooth(dm->Smooth); thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<float> tPressure(dm->Pressure); thrust::device_ptr<float> tSound(dm->Sound); thrust::device_ptr<int> tIntDummy(dm->IntDummy); thrust::device_ptr<float> tFloatDummy(dm->FloatDummy); thrust::device_ptr<int> tSetStart(dm->SetStart); thrust::device_ptr<int> tSetStop(dm->SetStop); hipLaunchKernelGGL(( updateHashDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, dGrid); hipLaunchKernelGGL(( checkOutDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, dGrid); thrust::sequence(tIndex, tIndex + dm->pn, 0); thrust::sort_by_key(tHash, tHash + dm->pn, tIndex); thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy); thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial); thrust::copy(tMass, tMass + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass); thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth); thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX); thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY); thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ); thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX); thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY); thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ); thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity); thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy); thrust::copy(tPressure, tPressure + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPressure); thrust::copy(tSound, tSound + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSound); pout = thrust::count(tHash, tHash + dm->pn, dGrid.nX * dGrid.nY * dGrid.nZ); dm->pn -= pout; thrust::fill(tSetStart, tSetStart + dGrid.nX * dGrid.nY * dGrid.nZ, 0); thrust::fill(tSetStop, tSetStop + dGrid.nX * dGrid.nY * dGrid.nZ, 0); hipLaunchKernelGGL(( updateSetsDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->SetStart, dm->SetStop, dm->Hash); hipLaunchKernelGGL(( updateListDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->List, dm->SetStart, dm->SetStop, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dGrid); return 0; } int RKstepHost(struct model *hm, float alpha) { int ip; for (ip = 0; ip < hm->pn; ip++) { hm->VelDotX[ip] = 0.0f; hm->VelDotY[ip] = 0.0f; hm->VelDotZ[ip] = 0.0f; hm->DensityDot[ip] = 0.0f; hm->EnergyDot[ip] = 0.0f; } // External loads updateLoadsHost(hm->pn, hm->Material, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot); // External forces //updateForcesHost(); // Calculate particle interactions balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Pressure, hm->Sound, hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ); //balanceMassMomentumHostOld(); balanceEnergyHost(hm->pn, hm->Pressure, hm->Density, hm->DensityDot, hm->EnergyDot); //balanceEnergyHostOld(); // Update particles updateParticlesHost(hm->pn, alpha, hm->Material, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot, hm->PosX0, hm->PosY0, hm->PosZ0, hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Energy, hm->Pressure, hm->Sound); //updateParticlesHostOld(alpha); return 0; } int RKstepDevice(struct model *dm, float alpha) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; thrust::device_ptr<float> tVelDotX(dm->VelDotX); thrust::device_ptr<float> tVelDotY(dm->VelDotY); thrust::device_ptr<float> tVelDotZ(dm->VelDotZ); thrust::device_ptr<float> tDensityDot(dm->DensityDot); thrust::device_ptr<float> tEnergyDot(dm->EnergyDot); thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f); thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f); thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f); thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f); thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f); // External loads hipLaunchKernelGGL(( updateLoadsDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->Material, dm->PosX, dm->PosY, dm->PosZ, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot); // Calculate particle interactions hipLaunchKernelGGL(( balanceMassMomentumDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound, dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ); hipLaunchKernelGGL(( balanceEnergyDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot); // Update particles hipLaunchKernelGGL(( updateParticlesDevice) , dim3(blocks), dim3(threads) , 0, 0, dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot, dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound); return 0; } int RKintegrateHost(struct model *hm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); printf("Particles: %i \n", hm->pn); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Inlet conditions inletHost(hm); // Calculate neighbour list neighbourListHost(hm); // Save initial condition backupDataHost(hm); // Step 1 RKstepHost(hm, 1.0); // Step 2 RKstepHost(hm, 1.0 / 4.0); // Step 3 RKstepHost(hm, 2.0 / 3.0); } return 0; } int RKintegrateDevice(struct model *hm, struct model *dm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { copyDeviceToHost(dm, hm); printf("Saving time: %g \n", ts * hRun.dt); printf("Particles: %i \n", hm->pn); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Inlet conditions inletDevice(dm); // Calculate neighbour list neighbourListDevice(dm); // Save initial condition backupDataDevice(dm); // Step 1 RKstepDevice(dm, 1.0); // Step 2 RKstepDevice(dm, 1.0 / 4.0); // Step 3 RKstepDevice(dm, 2.0 / 3.0); } return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date Oct 20, 2010 * \author Luca Massidda */ struct model hModel, dModel; int i; initHost(&hModel); for (i = 0; i < 10; i++) { hLoad[i].gx = 0.0f; hLoad[i].gy = 0.0f; hLoad[i].gz = 0.0f; hLoad[i].w = 0.0f; hOut[i].nX = 0.0f; hOut[i].nY = 0.0f; hOut[i].nZ = 0.0f; } //initBox(&hModel); //initBath(&hModel); //initDamBreak(&hModel); initChannel(&hModel); initDevice(&dModel); copyHostToDevice(&hModel, &dModel); RKintegrateDevice(&hModel, &dModel); //RKintegrateHost(&hModel); return 0; }
c15bd6e880450fa50537fd536a9d6576f0c8342f.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cutil_math.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/count.h> #define PI 3.14159f #define MAXP 40960 #define MAXPI 4096 #define MAXN 96 #define MAXG 409600 #define THREADS 256 struct pair { int key; int value; }; struct grid { float oX, oY, oZ; float size; int nX, nY, nZ; }; struct simulation { float minX, maxX; float minY, maxY; float minZ, maxZ; float dt; int tsn; int ssi; int nsi; }; struct load { float minX, maxX; float minY, maxY; float minZ, maxZ; float gx; float gy; float gz; float w; }; struct fix { float minX, maxX; float minY, maxY; float minZ, maxZ; float velX, velY, velZ; }; struct outlet { float oX, oY, oZ; float nX, nY, nZ; float R; }; struct inlet { float oX, oY, oZ; float nX, nY, nZ; float R; int Material; float Mass, Smooth; float Velocity; float Density, Energy; float Distance; }; struct model { int pn; int* Material; float* Mass; float* Smooth; float* PosX; float* PosY; float* PosZ; float* VelX; float* VelY; float* VelZ; float* Density; float* Energy; float* Pressure; float* Sound; float* VelDotX; float* VelDotY; float* VelDotZ; float* DensityDot; float* EnergyDot; float* PosX0; float* PosY0; float* PosZ0; float* VelX0; float* VelY0; float* VelZ0; float* Density0; float* Energy0; int* List; int* Hash; int* Index; int* SetStart; int* SetStop; int* IntDummy; float* FloatDummy; }; // Host Variables int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; struct load hLoad[10]; struct fix hFix[10]; struct outlet hOut[10]; struct inlet hIn[10]; // Device Variables __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ __constant__ struct load dLoad[10]; __device__ __constant__ struct fix dFix[10]; __device__ __constant__ struct outlet dOut[10]; __device__ __constant__ struct inlet dIn[10]; __host__ __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); w = 0.0f; if (q < 2) { w = powf((1.0f - 0.5f*q),4); w *= 1.0f + 2.0f*q; w *= alpha; } return w; } __host__ __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D alpha = 15.0f / (16.0f * PI * h * h * h); // for 2D //alpha = 7.0f / (4.0f * PI * h * h); dwdr = 0.0f; if (q < 2) { dwdr = 5.0f / 8.0f * q * powf((q - 2.0f), 3) ; dwdr *= alpha / h; } return dwdr; } float pressureGasHost(int mat ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = dMatProp[mat][1] * pshift = dMatProp[mat][2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = (hMatProp[mat][1] - 1.0) * rho * u; p += hMatProp[mat][2]; // c = sqrtf(hMatProp[mat][1] * (hMatProp[mat][1] - 1.0) * u); return p; } float pressurePolyHost(int mat , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = dMatProp[mat][0]; * a1 = dMatProp[mat][1]; * a2 = dMatProp[mat][2]; * a3 = dMatProp[mat][3]; * b0 = dMatProp[mat][4]; * b1 = dMatProp[mat][5]; * t1 = dMatProp[mat][6]; * t2 = dMatProp[mat][7]; * pmin = dMatProp[mat][8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; // float c; mu = (rho - hMatProp[mat][0]) / hMatProp[mat][0]; if (mu < 0) p = (hMatProp[mat][6] * mu + hMatProp[mat][7] * mu*mu) + (hMatProp[mat][4] * hMatProp[mat][0] * u); else p = (hMatProp[mat][1] * mu + hMatProp[mat][2] * mu*mu + hMatProp[mat][3] * mu*mu*mu) + ((hMatProp[mat][4] + hMatProp[mat][5] * mu) * hMatProp[mat][0] * u); if (p < hMatProp[mat][8]) p = hMatProp[mat][8]; // c = sqrtf(hMatProp[mat][1] / rho); return p; } float pressureShockHost(int mat, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * g0 = dMatProp[mat][2]; * s0 = dMatProp[mat][3]; * pmin = dMatProp[mat][4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; // float c; mu = (rho - hMatProp[mat][0]) / hMatProp[mat][0]; ph = (hMatProp[mat][0] * powf(hMatProp[mat][1], 2) * mu*(1.0 +mu)) / powf((1.0 - (hMatProp[mat][3] -1.0) * mu), 2); p = ph + hMatProp[mat][2] * hMatProp[mat][0] * (u - (0.5 * ph / hMatProp[mat][0] * (mu / (1.0 + mu)))); if (p < hMatProp[mat][4]) p = hMatProp[mat][4]; // c = hMatProp[mat][1]; return p; } float pressureTaitHost(int mat, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * pmin = dMatProp[mat][2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = hMatProp[mat][0] * powf(hMatProp[mat][1], 2) / 7.0 * (powf((rho / hMatProp[mat][0]), 7) - 1.0); if (p < hMatProp[mat][2]) p = hMatProp[mat][2]; // c = hMatProp[mat][1]; return p; } // Global code void balanceMassMomentumHost(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; float dx, dy, dz, dr, dvr, dwdr, f, w, w0; for (ip = 0; ip < pn; ip++) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); w0 = kernelWendland(0.0f, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action //f = -(Pressure[ip] + Pressure[jp]) // / (Density[ip] * Density[jp]); f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; iVelDotX += jMass * f * dwdr * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass * f * dwdr * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass * f * dwdr * (PosZ[ip] - PosZ[jp]) / dr; // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.5f * w / w0 * Sound[ip] * Sound[jp] / iSmooth; iVelDotX += jMass / (Mass[ip] + jMass) * f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += jMass / (Mass[ip] + jMass) * f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += jMass / (Mass[ip] + jMass) * f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } __global__ void balanceMassMomentumDevice(const int pn, const int* List, const int* Material, const float* Mass, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const float* VelX, const float* VelY, const float* VelZ, const float* Density, const float* Pressure, const float* Sound, float* DensityDot, float* VelDotX, float* VelDotY, float* VelDotZ) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float iSmooth, jMass; volatile float dx, dy, dz, dr, dvr, dwdr, f, w, w0, q; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iDensityDot = 0.0f; iVelDotX = 0.0f; iVelDotY = 0.0f; iVelDotZ = 0.0f; iSmooth = Smooth[ip]; for (il = 0; il < MAXN; il++) { jp = List[ip * MAXN + il]; jMass = Mass[jp]; dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < (0.01f * iSmooth)) dr = 100.0f * iSmooth; w = kernelWendland(dr, iSmooth); dwdr = kernelDerivWendland(dr, iSmooth); if (Material[ip] == Material[jp]) { dvr = 0.0f; dvr += (PosX[ip] - PosX[jp]) * (VelX[ip] - VelX[jp]); dvr += (PosY[ip] - PosY[jp]) * (VelY[ip] - VelY[jp]); dvr += (PosZ[ip] - PosZ[jp]) * (VelZ[ip] - VelZ[jp]); iDensityDot += jMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(Pressure[ip] / powf(Density[ip], 2) + Pressure[jp] / powf(Density[jp], 2)); f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; // Calculate shock correction for mass f = Density[ip] - Density[jp]; f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); iDensityDot += jMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0.0f) f = dvr; else f = 0.0f; f *= iSmooth / (dr * dr + 0.01f * iSmooth * iSmooth); f *= 2.0f * Sound[ip] / (Density[ip] + Density[jp]); f *= 0.03f; f *= jMass * dwdr; iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } // Calculate boundary repulsion if (Material[ip] != Material[jp]) { f = 0.5f * w * Mass[jp] / Density[jp] / Smooth[jp] * powf(Sound[jp], 2); iVelDotX += f * (PosX[ip] - PosX[jp]) / dr; iVelDotY += f * (PosY[ip] - PosY[jp]) / dr; iVelDotZ += f * (PosZ[ip] - PosZ[jp]) / dr; } } DensityDot[ip] += iDensityDot; VelDotX[ip] += iVelDotX; VelDotY[ip] += iVelDotY; VelDotZ[ip] += iVelDotZ; } } void balanceEnergyHost(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; for (ip = 0; ip < pn; ip++) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __global__ void balanceEnergyDevice(const int pn, const float* Pressure, const float* Density, const float* DensityDot, float* EnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { iPressure = Pressure[ip]; iDensity = Density[ip]; iDensityDot = DensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); EnergyDot[ip] += iEnergyDot; } } __host__ __device__ float pressureGas(float* properties, float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = (properties[1] - 1.0f) * rho * u; p += properties[2]; return p; } __host__ __device__ float pressurePoly(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; mu = (rho - properties[0]) / properties[0]; if (mu < 0) p = (properties[6] * mu + properties[7] * mu*mu) + (properties[4] * properties[0] * u); else p = (properties[1] * mu + properties[2] * mu*mu + properties[3] * mu*mu*mu) + ((properties[4] + properties[5] * mu) * properties[0] * u); //if (p < properties[8]) p = properties[8]; return p; } __host__ __device__ float pressureShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; mu = (rho - properties[0]) / properties[0]; ph = (properties[0] * powf(properties[1], 2) * mu*(1.0f +mu)) / powf((1.0f - (properties[3] -1.0f) * mu), 2); p = ph + properties[2] * properties[0] * (u - (0.5f * ph / properties[0] * (mu / (1.0f + mu)))); //if (p < properties[4]) p = properties[4]; return p; } __host__ __device__ float pressureTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; p = properties[0] * powf(properties[1], 2) / 7.0f * (powf((rho / properties[0]), 7) - 1.0f); //if (p < properties[2]) p = properties[2]; return p; } __host__ __device__ float soundGas(float* properties ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = properties[1] * pshift = properties[2] * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] * (properties[1] - 1.0f) * u); return c; } __host__ __device__ float soundPoly(float* properties , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = sqrtf(properties[1] / rho); return c; } __host__ __device__ float soundShock(float* properties, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float soundTait(float* properties, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float c; c = properties[1]; return c; } __host__ __device__ float densityPoly(float* properties , float rho) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = properties[0]; * a1 = properties[1]; * a2 = properties[2]; * a3 = properties[3]; * b0 = properties[4]; * b1 = properties[5]; * t1 = properties[6]; * t2 = properties[7]; * pmin = properties[8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityShock(float* properties, float rho) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = properties[0]; * c0 = properties[1]; * g0 = properties[2]; * s0 = properties[3]; * pmin = properties[4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } __host__ __device__ float densityTait(float* properties, float rho) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = properties[0]; * c0 = properties[1]; * pmin = properties[2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float rho0; rho0 = properties[0]; if (rho < 0.9f * rho0) rho = 0.9f*rho0; return rho; } void updateParticlesHost(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; for (ip = 0; ip < pn; ip++) if (Material[ip] != 0) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + hRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + hRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + hRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + hRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + hRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + hRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + hRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + hRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; if (iMaterial <= 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } for (i = 0; i < 10; i++) if ((PosX[ip] > hFix[i].minX) && (PosX[ip] < hFix[i].maxX) && (PosY[ip] > hFix[i].minY) && (PosY[ip] < hFix[i].maxY) && (PosZ[ip] > hFix[i].minZ) && (PosZ[ip] < hFix[i].maxZ)) { VelX[ip] = hFix[i].velX; VelY[ip] = hFix[i].velY; VelZ[ip] = hFix[i].velZ; } iMaterial = abs(iMaterial); if (hMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; Density[ip] = Density0[ip]; Energy[ip] = Energy0[ip]; } switch (hMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(hMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(hMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(hMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(hMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } __global__ void updateParticlesDevice(const int pn, const float alpha, const int* Material, const float* VelDotX, const float* VelDotY, const float* VelDotZ, const float* DensityDot, const float* EnergyDot, const float* PosX0, const float* PosY0, const float* PosZ0, const float* VelX0, const float* VelY0, const float* VelZ0, const float* Density0, const float* Energy0, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* Density, float* Energy, float* Pressure, float* Sound) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, i; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { PosX[ip] = PosX0[ip] + alpha * (PosX[ip] + dRun.dt * VelX[ip] - PosX0[ip]); PosY[ip] = PosY0[ip] + alpha * (PosY[ip] + dRun.dt * VelY[ip] - PosY0[ip]); PosZ[ip] = PosZ0[ip] + alpha * (PosZ[ip] + dRun.dt * VelZ[ip] - PosZ0[ip]); VelX[ip] = VelX0[ip] + alpha * (VelX[ip] + dRun.dt * VelDotX[ip] - VelX0[ip]); VelY[ip] = VelY0[ip] + alpha * (VelY[ip] + dRun.dt * VelDotY[ip] - VelY0[ip]); VelZ[ip] = VelZ0[ip] + alpha * (VelZ[ip] + dRun.dt * VelDotZ[ip] - VelZ0[ip]); //VelZ[ip] = 0.0f; Density[ip] = Density0[ip] + alpha * (Density[ip] + dRun.dt * DensityDot[ip] - Density0[ip]); Energy[ip] = Energy0[ip] + alpha * (Energy[ip] + dRun.dt * EnergyDot[ip] - Energy0[ip]); iMaterial = Material[ip]; for (i = 0; i < 10; i++) if ((PosX[ip] > dFix[i].minX) && (PosX[ip] < dFix[i].maxX) && (PosY[ip] > dFix[i].minY) && (PosY[ip] < dFix[i].maxY) && (PosZ[ip] > dFix[i].minZ) && (PosZ[ip] < dFix[i].maxZ)) { VelX[ip] = dFix[i].velX; VelY[ip] = dFix[i].velY; VelZ[ip] = dFix[i].velZ; } if (dMatType[iMaterial] == 0) { VelX[ip] = VelX0[ip]; VelY[ip] = VelY0[ip]; VelZ[ip] = VelZ0[ip]; } switch (dMatType[iMaterial]) { case (0) : // BOUNDARY Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = 0.0f*pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (1) : // IDEAL GAS EOS Pressure[ip] = pressureGas(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundGas(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS Density[ip] = densityPoly(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressurePoly(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundPoly(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (3) : // MIE-GRUNEISEN SHOCK EOS Density[ip] = densityShock(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureShock(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundShock(dMatProp[iMaterial], Density[ip], Energy[ip]); break; case (4) : // TAIT EOS Density[ip] = densityTait(dMatProp[iMaterial], Density[ip]); Pressure[ip] = pressureTait(dMatProp[iMaterial], Density[ip], Energy[ip]); Sound[ip] = soundTait(dMatProp[iMaterial], Density[ip], Energy[ip]); break; default : Pressure[ip] = 0.0f; } } } void updateLoadsHost(const int pn, const int* Material, float* PosX, float* PosY, float* PosZ, float* VelX, float* VelY, float* VelZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; for (ip = 0; ip < pn; ip++) { if (Material[ip] > 0) { for (i = 0; i < 10; i++) { if ((PosX[ip] > hLoad[i].minX) && (PosX[ip] < hLoad[i].maxX) && (PosZ[ip] < hLoad[i].maxZ) && (PosY[ip] > hLoad[i].minY) && (PosY[ip] < hLoad[i].maxY) && (PosZ[ip] > hLoad[i].minZ) && (PosZ[ip] < hLoad[i].maxZ)) { VelDotX[ip] += hLoad[i].gx; VelDotY[ip] += hLoad[i].gy; VelDotZ[ip] += hLoad[i].gz; EnergyDot[ip] += hLoad[i].w; } } } } } __global__ void updateLoadsDevice(const int pn, const int* Material, const float* PosX, const float* PosY, const float* PosZ, float* VelDotX, float* VelDotY, float* VelDotZ, float* EnergyDot) { int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if ((ip < pn) && (Material[ip] > 0)) { for (i = 0; i < 10; i++) { if ((PosX[ip] > dLoad[i].minX) && (PosX[ip] < dLoad[i].maxX) && (PosZ[ip] < dLoad[i].maxZ) && (PosY[ip] > dLoad[i].minY) && (PosY[ip] < dLoad[i].maxY) && (PosZ[ip] > dLoad[i].minZ) && (PosZ[ip] < dLoad[i].maxZ)) { VelDotX[ip] += dLoad[i].gx; VelDotY[ip] += dLoad[i].gy; VelDotZ[ip] += dLoad[i].gz; EnergyDot[ip] += dLoad[i].w; } } } } // Host code int initHost(struct model *hm) { hm->Material = (int *) malloc(MAXP * sizeof(int)); hm->Mass = (float *) malloc(MAXP * sizeof(float)); hm->Smooth = (float *) malloc(MAXP * sizeof(float)); hm->PosX = (float *) malloc(MAXP * sizeof(float)); hm->PosY = (float *) malloc(MAXP * sizeof(float)); hm->PosZ = (float *) malloc(MAXP * sizeof(float)); hm->VelX = (float *) malloc(MAXP * sizeof(float)); hm->VelY = (float *) malloc(MAXP * sizeof(float)); hm->VelZ = (float *) malloc(MAXP * sizeof(float)); hm->Density = (float *) malloc(MAXP * sizeof(float)); hm->Energy = (float *) malloc(MAXP * sizeof(float)); hm->Pressure = (float *) malloc(MAXP * sizeof(float)); hm->Sound = (float *) malloc(MAXP * sizeof(float)); hm->VelDotX = (float *) malloc(MAXP * sizeof(float)); hm->VelDotY = (float *) malloc(MAXP * sizeof(float)); hm->VelDotZ = (float *) malloc(MAXP * sizeof(float)); hm->DensityDot = (float *) malloc(MAXP * sizeof(float)); hm->EnergyDot = (float *) malloc(MAXP * sizeof(float)); hm->PosX0 = (float *) malloc(MAXP * sizeof(float)); hm->PosY0 = (float *) malloc(MAXP * sizeof(float)); hm->PosZ0 = (float *) malloc(MAXP * sizeof(float)); hm->VelX0 = (float *) malloc(MAXP * sizeof(float)); hm->VelY0 = (float *) malloc(MAXP * sizeof(float)); hm->VelZ0 = (float *) malloc(MAXP * sizeof(float)); hm->Density0 = (float *) malloc(MAXP * sizeof(float)); hm->Energy0 = (float *) malloc(MAXP * sizeof(float)); hm->Hash = (int *) malloc(MAXP * sizeof(int)); hm->Index = (int *) malloc(MAXP * sizeof(int)); hm->List = (int *) malloc(MAXP * MAXN * sizeof(int)); hm->IntDummy = (int *) malloc(MAXP * sizeof(int)); hm->FloatDummy = (float *) malloc(MAXP * sizeof(float)); hm->SetStart = (int *) malloc(MAXG * sizeof(int)); hm->SetStop = (int *) malloc(MAXG * sizeof(int)); return 0; } int initDevice(struct model *dm) { size_t available, total; cudaMalloc((void**) &(dm->Material), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->Mass), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Smooth), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Density), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Energy), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Pressure), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Sound), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotX), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotY), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelDotZ), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->DensityDot), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->EnergyDot), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosX0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosY0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->PosZ0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelX0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelY0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->VelZ0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Density0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Energy0), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->Hash), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->Index), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->List), (MAXP * MAXN * sizeof(int))); cudaMalloc((void**) &(dm->IntDummy), (MAXP * sizeof(int))); cudaMalloc((void**) &(dm->FloatDummy), (MAXP * sizeof(float))); cudaMalloc((void**) &(dm->SetStart), (MAXG * sizeof(int))); cudaMalloc((void**) &(dm->SetStop), (MAXG * sizeof(int))); cudaMemGetInfo(&available, &total); printf("Available memory %d of %d MB\n", available/1024/1024, total/1024/1024); return 0; } int copyHostToDevice(struct model *hm, struct model *dm) { dm->pn = hm->pn; cudaMemcpy(dm->Material, hm->Material, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Mass, hm->Mass, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Smooth, hm->Smooth, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosX, hm->PosX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosY, hm->PosY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosZ, hm->PosZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelX, hm->VelX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelY, hm->VelY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelZ, hm->VelZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Density, hm->Density, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Energy, hm->Energy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Pressure, hm->Pressure, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Sound, hm->Sound, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotX, hm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotY, hm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelDotZ, hm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->DensityDot, hm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->EnergyDot, hm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosX0, hm->PosX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosY0, hm->PosY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosZ0, hm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelX0, hm->VelX0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelY0, hm->VelY0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelZ0, hm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Density0, hm->Density0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Energy0, hm->Energy0, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->List, hm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Hash, hm->Hash, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Index, hm->Index, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->IntDummy, hm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->FloatDummy, hm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->SetStart, hm->SetStart, (MAXG * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->SetStop, hm->SetStop, (MAXG * sizeof(int)), cudaMemcpyHostToDevice); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.oZ = hGrid.oZ; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.nZ = hGrid.nZ; dGrid.size = hGrid.size; cudaMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)); cudaMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)); //cudaMemcpyToSymbol("dGrid", &hGrid, sizeof(struct grid)); cudaMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)); cudaMemcpyToSymbol("dLoad", &hLoad, 10 * sizeof(struct load)); cudaMemcpyToSymbol("dFix", &hFix, 10 * sizeof(struct fix)); cudaMemcpyToSymbol("dIn", &hIn, 10 * sizeof(struct inlet)); cudaMemcpyToSymbol("dOut", &hOut, 10 * sizeof(struct outlet)); return 0; } int copyDeviceToHost(struct model *dm, struct model *hm) { hm->pn = dm->pn; cudaMemcpy(hm->Material, dm->Material, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Mass, dm->Mass, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Smooth, dm->Smooth, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosX, dm->PosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosY, dm->PosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosZ, dm->PosZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelX, dm->VelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelY, dm->VelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelZ, dm->VelZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Density, dm->Density, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Energy, dm->Energy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Pressure, dm->Pressure, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Sound, dm->Sound, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotX, dm->VelDotX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotY, dm->VelDotY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelDotZ, dm->VelDotZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->DensityDot, dm->DensityDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->EnergyDot, dm->EnergyDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosX0, dm->PosX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosY0, dm->PosY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->PosZ0, dm->PosZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelX0, dm->VelX0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelY0, dm->VelY0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->VelZ0, dm->VelZ0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Density0, dm->Density0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Energy0, dm->Energy0, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->List, dm->List, (MAXP * MAXN * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Hash, dm->Hash, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->Index, dm->Index, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->IntDummy, dm->IntDummy, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->FloatDummy, dm->FloatDummy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->SetStart, dm->SetStart, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost); cudaMemcpy(hm->SetStop, dm->SetStop, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost); hGrid.oX = dGrid.oX; hGrid.oY = dGrid.oY; hGrid.oZ = dGrid.oZ; hGrid.nX = dGrid.nX; hGrid.nY = dGrid.nY; hGrid.nZ = dGrid.nZ; hGrid.size = dGrid.size; return 0; } int backupDataHost(struct model *hm) { memcpy(hm->PosX0, hm->PosX, MAXP * sizeof(float)); memcpy(hm->PosY0, hm->PosY, MAXP * sizeof(float)); memcpy(hm->PosZ0, hm->PosZ, MAXP * sizeof(float)); memcpy(hm->VelX0, hm->VelX, MAXP * sizeof(float)); memcpy(hm->VelY0, hm->VelY, MAXP * sizeof(float)); memcpy(hm->VelZ0, hm->VelZ, MAXP * sizeof(float)); memcpy(hm->Density0, hm->Density, MAXP * sizeof(float)); memcpy(hm->Energy0, hm->Energy, MAXP * sizeof(float)); return 0; } int backupDataDevice(struct model *dm) { cudaMemcpy(dm->PosX0, dm->PosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->PosY0, dm->PosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->PosZ0, dm->PosZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->VelX0, dm->VelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->VelY0, dm->VelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->VelZ0, dm->VelZ, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->Density0, dm->Density, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); cudaMemcpy(dm->Energy0, dm->Energy, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice); return 0; } /* int initRun() { FILE *stream; char tok[10]; int i, m, p, pn; int iv; float fv; int mpn, mpp[10]; // Open stream file stream = fopen("armando.run", "r"); while (!feof(stream)) { sprintf(tok, " "); fscanf(stream, "%s", tok); if (strcmp(tok, "MAT") == 0) { fscanf(stream, "%i", &iv); if ((iv > 0) && (iv <= 50)) m = iv; for (p = 0; p < 10; p++) hMatProp[m][p] = 0.0; if ((m > 0) && (m <= 10)) pn = 3; if ((m > 10) && (m <= 20)) pn = 9; if ((m > 20) && (m <= 30)) pn = 10; if ((m > 30) && (m <= 40)) pn = 5; if ((m > 40) && (m <= 50)) pn = 3; for (p = 0; p < pn; p++) { fscanf(stream, "%f", &fv); hMatProp[m][p] = fv; } printf("Material %d\n", m); printf("hMatProp: \n"); for (p = 0; p < pn; p++) printf(" %f\n", hMatProp[m][p]); printf("\n"); } if (strcmp(tok, "TIME") == 0) { fscanf(stream, "%f", &fv); if (fv > 0.0) hRun.dt = fv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.tsn = iv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.ssi = iv; printf("Time step: %f\n", hRun.dt); printf("Steps: %i\n", hRun.tsn); printf("Save step: %i\n", hRun.ssi); printf("\n"); } if (strcmp(tok, "LIMITS") == 0) { fscanf(stream, "%f", &fv); hRun.minX = fv; fscanf(stream, "%f", &fv); hRun.maxX = fv; fscanf(stream, "%f", &fv); hRun.minY = fv; fscanf(stream, "%f", &fv); hRun.maxY = fv; printf("Domain limits: \n"); printf("X: %+e - %+e \n", hRun.minX, hRun.maxX); printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY); printf("\n"); } if (strcmp(tok, "MONITORS") == 0) { fscanf(stream, "%i", &iv); mpn = iv; for (i = 0; i < mpn; i++) { fscanf(stream, "%i", &iv); mpp[i] = iv; } printf("Monitored particles: %i \n", mpn); if (mpn > 0) { printf("Index:"); for (i = 0; i < mpn; i++) printf(" %i", mpp[i]); printf("\n"); printf("\n"); } } } fclose(stream); hSound = hSmooth / hRun.dt; return 0; } int scanData() { FILE *stream; int i; float fv1, fv2, fv3; int iv; // Stream file position stream = fopen("in_pos.txt", "r"); for (i = 0; !feof(stream); i++) { fscanf(stream, "%e %e ", &fv1, &fv2); hPosX[i] = fv1; hPosY[i] = fv2; } fclose(stream); hPN = i; // Stream file velocity stream = fopen("in_vel.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e", &fv1, &fv2); hVelX[i] = fv1; hVelY[i] = fv2; } fclose(stream); // Stream file info stream = fopen("in_info.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2); hMaterial[i] = iv; hMass = fv1; hSmooth = fv2; } fclose(stream); // Stream file field stream = fopen("in_field.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e %e ", &fv1, &fv2, &fv3); hDensity[i] = fv1; hPressure[i] = fv2; hEnergy[i] = fv3; } fclose(stream); return 0; } */ int printData(struct model *hm) { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("new_pos.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); // Stream file velocity stream = fopen("new_vel.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); // Stream file info stream = fopen("new_info.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hm->Material[i], hm->Mass[i], hm->Smooth[i]); fclose(stream); // Stream file field stream = fopen("new_field.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hm->Density[i], hm->Pressure[i], hm->Energy[i]); fclose(stream); /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %f %f %f\n", i, hm->Index[i], hm->Hash[i], hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fclose(stream); */ /* // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %f %f %f %f %f %f\n", i, hm->VelX[i], hm->VelY[i], hm->VelZ[i], hm->Density[i], hm->Energy[i], hm->Pressure[i]); fclose(stream); */ // Stream file add1 stream = fopen("new_debug.txt", "w"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%d %d %d %d %d\n", i, hm->Index[i], hm->Hash[i], hm->SetStart[hm->Hash[i]], hm->SetStop[hm->Hash[i]]); fclose(stream); /* for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (int j = 0; j < MAXN; j++) printf("%d ", hm->List[i * MAXN +j]); printf("\n"); } */ return 0; } int outputVTK(struct model *hm, int ss) { /** * \brief Output Data file * * Saves vtk data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "out%05d.vtk", ss); stream = fopen(filename, "w"); fprintf(stream, "# vtk DataFile Version 2.0\n"); fprintf(stream, "Unstructured Grid Example\n"); fprintf(stream, "ASCII\n"); fprintf(stream, "DATASET UNSTRUCTURED_GRID\n"); fprintf(stream, "POINTS %i float\n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->PosX[i], hm->PosY[i], hm->PosZ[i]); fprintf(stream, "CELLS %i %i \n", hm->pn, 2*hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i %i \n", 1, i); fprintf(stream, "CELL_TYPES %i \n", hm->pn); for (i = 0; i < hm->pn; i++) fprintf(stream, "%i \n", 1); fprintf(stream, "POINT_DATA %i \n", hm->pn); fprintf(stream, "SCALARS material int 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+d \n", hm->Material[i]); fprintf(stream, "SCALARS density float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Density[i]); fprintf(stream, "SCALARS pressure float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Pressure[i]); fprintf(stream, "SCALARS energy float 1 \n", hm->pn); fprintf(stream, "LOOKUP_TABLE default\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e \n", hm->Energy[i]); fprintf(stream, "VECTORS velocity float\n"); for (i = 0; i < hm->pn; i++) fprintf(stream, "%+e %+e %+e \n", hm->VelX[i], hm->VelY[i], hm->VelZ[i]); fclose(stream); /* for (i = 0; i < hm->pn; i++) printf("%d %d \n", i, hm->Hash[i]); printf("\n\n\n"); for (i = 0; i < hm->SetStart.size(); i++) printf("%d %d %d \n", i, hm->SetStart[i], hm->SetStop[i]); for (i = 0; i < hm->pn; i++) { printf("%d - ", i); for (j = 0; j < MAXN; j++) printf("%d ", hm->List[i*MAXN +j]); printf("\n"); } */ return 0; } void initFree(struct model *hm) { int i, j, k, m, b, pi; double rho, c0, pmin; double dr; m = 1; b = 2; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1; // x4 pi = 0; for (k = 0; k < 10; k++) { for (j = 0; j < 10; j++) { for (i = 0; i < 10; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 0; k < 10; k++) { for (j = -2; j < -1; j++) { for (i = 0; i < 10; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = 2.0e-3; //1.0e-3; hRun.tsn = 600; //1000; hRun.ssi = 200; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Freefall\n"); printf("Particles: %i \n", hm->pn); } void initBox(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 9; m = 1; b = 2; rho = 1000.; c0 = 40.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 15 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < 20 * q +2; k++) { for (j = -2; j < -1; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < -1; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 20 * q +1; k < 20 * q +2 ; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 20 * q +1; k++) { for (j = -1; j < 15 * q; j++) { for (i = -2; i < -1; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 20 * q +1; k++) { for (j = -1; j < 15 * q; j++) { for (i = 20 * q +1; i < 20 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 800 * q; hRun.ssi = 20 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Box\n"); printf("Particles: %i \n", hm->pn); } void initBath(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 15; m = 1; b = 2; rho = 1000.; c0 = 40.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 10 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < 10 * q +2; k++) { for (j = -2; j < -1; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -2; k < -1; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = 10 * q +1; k < 10 * q +2 ; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 10 * q +1; k++) { for (j = -1; j < 12 * q; j++) { for (i = -2; i < -1; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } for (k = -1; k < 10 * q +1; k++) { for (j = -1; j < 12 * q; j++) { for (i = 10 * q +1; i < 10 * q +2; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 1000 * q; hRun.ssi = 20 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; hOut[0].oX = 5 * q * dr; hOut[0].oY = dr; hOut[0].oZ = 5 * q * dr; hOut[0].nX = 0.0f; hOut[0].nY = 1.0f; hOut[0].nZ = 0.0f; hOut[0].R = 2.0f*q*dr; hIn[0].Material = m; hIn[0].Mass = rho * dr * dr * dr; hIn[0].Smooth = 1.2f * dr; hIn[0].Density = rho; hIn[0].Energy = 0.0f; hIn[0].oX = 0.f * q * dr; hIn[0].oY = 15.f * q * dr; hIn[0].oZ = 5.f * q * dr; hIn[0].nX = 1.0f; hIn[0].nY = 0.0f; hIn[0].nZ = 0.0f; hIn[0].Velocity = 1.5f; hIn[0].R = 2.0f *q*dr; printf("Bath\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } void initChannel(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 1; m = 1; b = 2; rho = 1000.; c0 = 20.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.1 / q; pi = 0; /* for (k = 0; k < 10 * q; k++) { for (j = 0; j < 10 * q; j++) { for (i = 0; i < 10 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hm->Energy[pi] = 0.0; hm->Pressure[pi] = 1.0; pi++; } } } */ for (k = -10 * q -2; k <= 10 * q +2; k++) { for (j = -2; j <= -2; j++) { for (i = 0; i <= 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -10 * q -2; k <= -10 * q -2; k++) { for (j = -1; j <= 15 * q; j++) { for (i = 0; i < 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = 10 * q +2; k <= 10 * q +2 ; k++) { for (j = -1; j <= 15 * q; j++) { for (i = 0; i <= 100 * q; i++) { hm->PosX[pi] = i * dr + 0.0 * dr; hm->PosY[pi] = j * dr + 0.0 * dr; hm->PosZ[pi] = k * dr + 0.0 * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -0.5; hRun.maxX = 10.5; hRun.minY = -0.5; hRun.maxY = 3.0; hRun.minZ = -1.5; hRun.maxZ = 1.5; hRun.dt = dr / c0; hRun.tsn = 10000 * q; hRun.ssi = 200 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; hFix[0].minX = hRun.minX; hFix[0].maxX = 2.0f * q * dr; hFix[0].minY = hRun.minY; hFix[0].maxY = hRun.maxY; hFix[0].minZ = hRun.minZ; hFix[0].maxZ = hRun.maxZ; hFix[0].velX = 2.0f; hFix[0].velY = 0.0f; hFix[0].velZ = 0.0f; hFix[1].minX = 97 * q * dr; hFix[1].maxX = hRun.maxX; hFix[1].minY = hRun.minY; hFix[1].maxY = hRun.maxY; hFix[1].minZ = hRun.minZ; hFix[1].maxZ = hRun.maxZ; hFix[1].velX = 2.0f; hFix[1].velY = 0.0f; hFix[1].velZ = 0.0f; hOut[0].oX = 100 * q * dr; hOut[0].oY = 5 * q * dr; hOut[0].oZ = 0.0f; hOut[0].nX = -1.0f; hOut[0].nY = 0.0f; hOut[0].nZ = 0.0f; hOut[0].R = 20.0f*q*dr; hIn[0].Material = m; hIn[0].Mass = rho * dr * dr * dr; hIn[0].Smooth = 1.2f * dr; hIn[0].Density = rho; hIn[0].Energy = 0.0f; hIn[0].oX = 10.f * q * dr; hIn[0].oY = 20.f * q * dr; hIn[0].oZ = 0.f * q * dr; hIn[0].nX = 0.5f; hIn[0].nY = -0.5f; hIn[0].nZ = 0.0f; hIn[0].Velocity = 2.0f; hIn[0].R = 10.0f *q*dr; printf("Channel\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } void initDamBreak(struct model *hm) { int i, j, k, m, b, q, pi; double rho, c0, pmin; double dr; q = 4; m = 1; b = 2; rho = 1000.; c0 = 20.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; hMatType[b] = 0; hMatProp[b][0] = rho; hMatProp[b][1] = c0; hMatProp[b][2] = pmin; dr = 0.025 / q; pi = 0; for (k = -20 * q; k <= 20 * q; k++) { for (j = 0; j <= 22 * q; j++) { for (i = 0; i <= 49 * q; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = m; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -2; k <= 20 * q +2; k++) { for (j = -2; j <= -2; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -2; k <= -20 * q -2; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = 20 * q +2; k <= 20 * q +2; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -1; k <= 20 * q +1; k++) { for (j = -1; j <= 40 * q; j++) { for (i = -80 * q -2; i <= -80 * q -2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -20 * q -1; k <= 20 * q +1; k++) { for (j = -1; j <= 40 * q; j++) { for (i = 49 * q +2; i <= 49 * q +2; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } for (k = -8 * q -1; k <= 8 * q +1; k++) { for (j = -0; j <= 6 * q -1; j++) { for (i = -53 * q +1; i <= -47 * q -1; i++) { hm->PosX[pi] = i * dr; hm->PosY[pi] = j * dr; hm->PosZ[pi] = k * dr; hm->VelX[pi] = 0.0; hm->VelY[pi] = 0.0; hm->VelZ[pi] = 0.0; hm->Material[pi] = b; hm->Density[pi] = rho; hm->Energy[pi] = 0.0; hm->Pressure[pi] = 0.0; pi++; } } } hm->pn = pi; for (i = 0; i < hm->pn; i++) { hm->Mass[i] = rho * dr * dr * dr; hm->Smooth[i] = 1.2 * dr; hm->Sound[i] = c0; } hRun.minX = -2.5; hRun.maxX = 2.5; hRun.minY = -2.5; hRun.maxY = 2.5; hRun.minZ = -2.5; hRun.maxZ = 2.5; hRun.dt = dr / c0; hRun.tsn = 2000 * q; hRun.ssi = 40 * q; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.oZ = hRun.minZ; hGrid.size = 2.0f * 1.2f * dr; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; hGrid.nZ = (int) ((hRun.maxZ - hRun.minZ) / hGrid.size) +1; hLoad[0].minX = hRun.minX; hLoad[0].maxX = hRun.maxX; hLoad[0].minY = hRun.minY; hLoad[0].maxY = hRun.maxY; hLoad[0].minZ = hRun.minZ; hLoad[0].maxZ = hRun.maxZ; hLoad[0].gy = -9.81f; printf("Dam break\n"); printf("Particles: %i \n", hm->pn); printf("Grid %i\n", hGrid.nX * hGrid.nY * hGrid.nZ); } int iSort(int *array, int *perm, int n) { int i; static int* dummy = NULL; if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int mapCompare(const void *a, const void *b) { int c; struct pair m1, m2; c = 0; m1 = *(struct pair*)a; m2 = *(struct pair*)b; if (m1.key < m2.key) c = -1; if (m1.key > m2.key) c = 1; return c; } void updateHashHost(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; for (ip = 0; ip < pn; ip++) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; if (ic < 0) ic = 0; if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1; Hash[ip] = ic; } } __global__ void updateHashDevice(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, iz, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { ix = (int) truncf((PosX[ip] - Grid.oX) / Grid.size); iy = (int) truncf((PosY[ip] - Grid.oY) / Grid.size); iz = (int) truncf((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; if (ic < 0) ic = 0; if (ic >= Grid.nX * Grid.nY * Grid.nZ) ic = Grid.nX * Grid.nY * Grid.nZ -1; Hash[ip] = ic; } } void checkOutHost(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float dn, dr; int ip, i; for (ip = 0; ip < pn; ip++) { for (i = 0; i < 10; i++) { dn = 0.0f; dn += (PosX[ip] - hOut[i].oX) * hOut[i].nX; dn += (PosY[ip] - hOut[i].oY) * hOut[i].nY; dn += (PosZ[ip] - hOut[i].oZ) * hOut[i].nZ; dr = 0.0f; dr += powf((PosX[ip] - hOut[i].oX) - dn * hOut[i].nX, 2); dr += powf((PosY[ip] - hOut[i].oY) - dn * hOut[i].nY, 2); dr += powf((PosZ[ip] - hOut[i].oZ) - dn * hOut[i].nZ, 2); dr = sqrtf(dr); if ((dn < 0.0f) && (dr < hOut[i].R)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } if ((PosX[ip] > hRun.maxX) || (PosX[ip] < hRun.minX) || (PosY[ip] > hRun.maxY) || (PosY[ip] < hRun.minY) || (PosZ[ip] > hRun.maxZ) || (PosZ[ip] < hRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } __global__ void checkOutDevice(const int pn, const float* PosX, const float* PosY, const float* PosZ, int* Hash, const struct grid Grid) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ float dn, dr; int ip, i; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < pn) { for (i = 0; i < 10; i++) { dn = 0.0f; dn += (PosX[ip] - dOut[i].oX) * dOut[i].nX; dn += (PosY[ip] - dOut[i].oY) * dOut[i].nY; dn += (PosZ[ip] - dOut[i].oZ) * dOut[i].nZ; dr = 0.0f; dr += powf((PosX[ip] - dOut[i].oX) - dn * dOut[i].nX, 2); dr += powf((PosY[ip] - dOut[i].oY) - dn * dOut[i].nY, 2); dr += powf((PosZ[ip] - dOut[i].oZ) - dn * dOut[i].nZ, 2); dr = sqrtf(dr); if ((dn < 0.0f) && (dr < dOut[i].R)) { Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } if ((PosX[ip] > dRun.maxX) || (PosX[ip] < dRun.minX) || (PosY[ip] > dRun.maxY) || (PosY[ip] < dRun.minY) || (PosZ[ip] > dRun.maxZ) || (PosZ[ip] < dRun.minZ)) Hash[ip] = Grid.nX * Grid.nY * Grid.nZ; } } void inletHost(struct model *hm) { int i, iu, iv, n, p; float3 u, v, w, r; int material[MAXPI]; float mass[MAXPI], smooth[MAXPI]; float posX[MAXPI], posY[MAXPI], posZ[MAXPI]; float velX[MAXPI], velY[MAXPI], velZ[MAXPI]; float density[MAXPI], energy[MAXPI]; p = 0; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if (hIn[i].Distance > hIn[i].Smooth / 1.2f) { hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f; w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ); w = normalize(w); if ((w.x <= w.y) && (w.x <= w.z)) u = make_float3(1.f, 0.f, 0.f); if ((w.y <= w.x) && (w.y <= w.z)) u = make_float3(0.f, 1.f, 0.f); if ((w.z <= w.x) && (w.z <= w.y)) u = make_float3(0.f, 0.f, 1.f); v = cross(w, u); n = roundf(1.2f * hIn[i].R / hIn[i].Smooth); for (iv = -n; iv <= n; iv++) { for (iu = -n; iu <= n; iu++) { r = iu * u + iv * v; r *= hIn[i].Smooth / 1.2f; if (length(r) < hIn[i].R) { material[p] = hIn[i].Material; mass[p] = hIn[i].Mass; smooth[p] = hIn[i].Smooth; posX[p] = hIn[i].oX + r.x; posY[p] = hIn[i].oY + r.y; posZ[p] = hIn[i].oZ + r.z; velX[p] = hIn[i].Velocity * w.x; velY[p] = hIn[i].Velocity * w.y; velZ[p] = hIn[i].Velocity * w.z; density[p] = hIn[i].Density; energy[p] = hIn[i].Energy; p++; } } } } } cudaMemcpy(hm->Material + hm->pn, material, (p * sizeof(int)), cudaMemcpyHostToHost); cudaMemcpy(hm->Mass + hm->pn, mass, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->Smooth + hm->pn, smooth, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->PosX + hm->pn, posX, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->PosY + hm->pn, posY, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->PosZ + hm->pn, posZ, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->VelX + hm->pn, velX, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->VelY + hm->pn, velY, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->VelZ + hm->pn, velZ, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->Density + hm->pn, density, (p * sizeof(float)), cudaMemcpyHostToHost); cudaMemcpy(hm->Energy + hm->pn, energy, (p * sizeof(float)), cudaMemcpyHostToHost); hm->pn += p; } void inletDevice(struct model *dm) { int i, iu, iv, n, p; float3 u, v, w, r; int material[MAXPI]; float mass[MAXPI], smooth[MAXPI]; float posX[MAXPI], posY[MAXPI], posZ[MAXPI]; float velX[MAXPI], velY[MAXPI], velZ[MAXPI]; float density[MAXPI], energy[MAXPI]; p = 0; for (i = 0; i < 10; i++) if (hIn[i].Material != 0) { hIn[i].Distance += hIn[i].Velocity * hRun.dt; if (hIn[i].Distance > hIn[i].Smooth / 1.2f) { hIn[i].Distance = hIn[i].Distance - hIn[i].Smooth / 1.2f; w = make_float3(hIn[i].nX, hIn[i].nY, hIn[i].nZ); w = normalize(w); if ((fabsf(w.x) <= fabsf(w.y)) && (fabsf(w.x) <= fabsf(w.z))) u = make_float3(1.f, 0.f, 0.f); if ((fabsf(w.y) <= fabsf(w.x)) && (fabsf(w.y) <= fabsf(w.z))) u = make_float3(0.f, 1.f, 0.f); if ((fabsf(w.z) <= fabsf(w.x)) && (fabsf(w.z) <= fabsf(w.y))) u = make_float3(0.f, 0.f, 1.f); v = cross(w, u); n = roundf(1.2f * hIn[i].R / hIn[i].Smooth); for (iv = -n; iv <= n; iv++) { for (iu = -n; iu <= n; iu++) { r = iu * u + iv * v; r *= hIn[i].Smooth / 1.2f; if (length(r) < hIn[i].R) { material[p] = hIn[i].Material; mass[p] = hIn[i].Mass; smooth[p] = hIn[i].Smooth; posX[p] = hIn[i].oX + r.x; posY[p] = hIn[i].oY + r.y; posZ[p] = hIn[i].oZ + r.z; velX[p] = hIn[i].Velocity * w.x; velY[p] = hIn[i].Velocity * w.y; velZ[p] = hIn[i].Velocity * w.z; density[p] = hIn[i].Density; energy[p] = hIn[i].Energy; p++; } } } } } cudaMemcpy(dm->Material + dm->pn, material, (p * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Mass + dm->pn, mass, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Smooth + dm->pn, smooth, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosX + dm->pn, posX, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosY + dm->pn, posY, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->PosZ + dm->pn, posZ, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelX + dm->pn, velX, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelY + dm->pn, velY, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->VelZ + dm->pn, velZ, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Density + dm->pn, density, (p * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(dm->Energy + dm->pn, energy, (p * sizeof(float)), cudaMemcpyHostToDevice); dm->pn += p; } void updateSetsHost(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; int hash, nextHash, prevHash; for (ip = 0; ip < pn; ip++) { hash = Hash[ip]; if (ip == 0) prevHash = -1; else prevHash = Hash[ip -1]; if (ip == pn -1) nextHash = -1; else nextHash = Hash[ip +1]; if (hash != prevHash) SetStart[hash] = ip; if (hash != nextHash) SetStop[hash] = ip +1; } } __global__ void updateSetsDevice(const int pn, int *SetStart, int *SetStop, const int* Hash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; hash = Hash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = Hash[ip -1]; } if (threadIdx.x == THREADS -1) { if (ip == pn -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = Hash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) SetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) SetStop[hash] = ip +1; } void updateListHost(const int pn, int *List, const int* SetStart, const int* SetStop, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const struct grid Grid) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled for (ip = 0; ip < pn; ip++) { ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) { for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } } __global__ void updateListDevice(const int pn, int *List, const int* SetStart, const int* SetStop, const float* Smooth, const float* PosX, const float* PosY, const float* PosZ, const struct grid Grid) { int ip, ic, ix, iy, iz, i, j, k, jp, jc, np; float dx, dy, dz, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= pn) return; ix = (int) ((PosX[ip] - Grid.oX) / Grid.size); iy = (int) ((PosY[ip] - Grid.oY) / Grid.size); iz = (int) ((PosZ[ip] - Grid.oZ) / Grid.size); ic = ix + iy * Grid.nX + iz * Grid.nX * Grid.nY; np = 0; for (k = -1; k <= 1; k++) { for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * Grid.nX + k * Grid.nX * Grid.nY; if (jc >= 0 && jc <= Grid.nX * Grid.nY * Grid.nZ) { for (jp = SetStart[jc]; jp < SetStop[jc]; jp++) { dx = PosX[ip] - PosX[jp]; dy = PosY[ip] - PosY[jp]; dz = PosZ[ip] - PosZ[jp]; dr = sqrtf(dx * dx + dy * dy + dz * dz); if ((dr < 2.0f * Smooth[ip]) && (np < MAXN)) { List[ip * MAXN + np] = jp; np++; } } } } } } while (np < MAXN) { List[ip * MAXN + np] = ip; np++; } } int neighbourListHost(struct model *hm) { struct pair map[MAXP]; int i, ip, pout; updateHashHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid); checkOutHost(hm->pn, hm->PosX, hm->PosY, hm->PosZ, hm->Hash, hGrid); for (ip = 0; ip < hm->pn; ip++) hm->Index[ip] = ip; for (ip = 0; ip < hm->pn; ip++) { map[ip].key = hm->Hash[ip]; map[ip].value = hm->Index[ip]; } qsort(map, hm->pn, sizeof(struct pair), mapCompare); for (ip = 0; ip < hm->pn; ip++) { hm->Hash[ip] = map[ip].key; hm->Index[ip] = map[ip].value; } iSort(hm->Material, hm->Index, hm->pn); fSort(hm->Mass, hm->Index, hm->pn); fSort(hm->Smooth, hm->Index, hm->pn); fSort(hm->PosX, hm->Index, hm->pn); fSort(hm->PosY, hm->Index, hm->pn); fSort(hm->PosZ, hm->Index, hm->pn); fSort(hm->VelX, hm->Index, hm->pn); fSort(hm->VelY, hm->Index, hm->pn); fSort(hm->VelZ, hm->Index, hm->pn); fSort(hm->Density, hm->Index, hm->pn); fSort(hm->Energy, hm->Index, hm->pn); fSort(hm->Pressure, hm->Index, hm->pn); fSort(hm->Sound, hm->Index, hm->pn); pout = 0; for (ip = 0; ip < hm->pn; ip++) if (hm->Hash[ip] == hGrid.nX * hGrid.nY * hGrid.nZ) pout++; hm->pn -= pout; for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStart[i] = 0; for (i = 0; i < hGrid.nX * hGrid.nY * hGrid.nZ; i++) hm->SetStop[i] = 0; updateSetsHost(hm->pn, hm->SetStart, hm->SetStop, hm->Hash); updateListHost(hm->pn, hm->List, hm->SetStart, hm->SetStop, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hGrid); return 0; } int neighbourListDevice(struct model *dm) { int pout; int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; thrust::device_ptr<int> tIndex(dm->Index); thrust::device_ptr<int> tHash(dm->Hash); thrust::device_ptr<int> tMaterial(dm->Material); thrust::device_ptr<float> tMass(dm->Mass); thrust::device_ptr<float> tSmooth(dm->Smooth); thrust::device_ptr<float> tPosX(dm->PosX); thrust::device_ptr<float> tPosY(dm->PosY); thrust::device_ptr<float> tPosZ(dm->PosZ); thrust::device_ptr<float> tVelX(dm->VelX); thrust::device_ptr<float> tVelY(dm->VelY); thrust::device_ptr<float> tVelZ(dm->VelZ); thrust::device_ptr<float> tDensity(dm->Density); thrust::device_ptr<float> tEnergy(dm->Energy); thrust::device_ptr<float> tPressure(dm->Pressure); thrust::device_ptr<float> tSound(dm->Sound); thrust::device_ptr<int> tIntDummy(dm->IntDummy); thrust::device_ptr<float> tFloatDummy(dm->FloatDummy); thrust::device_ptr<int> tSetStart(dm->SetStart); thrust::device_ptr<int> tSetStop(dm->SetStop); updateHashDevice <<< blocks, threads >>> (dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, dGrid); checkOutDevice <<< blocks, threads >>> (dm->pn, dm->PosX, dm->PosY, dm->PosZ, dm->Hash, dGrid); thrust::sequence(tIndex, tIndex + dm->pn, 0); thrust::sort_by_key(tHash, tHash + dm->pn, tIndex); thrust::copy(tMaterial, tMaterial + dm->pn, tIntDummy); thrust::gather(tIndex, tIndex + dm->pn, tIntDummy, tMaterial); thrust::copy(tMass, tMass + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tMass); thrust::copy(tSmooth, tSmooth + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSmooth); thrust::copy(tPosX, tPosX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosX); thrust::copy(tPosY, tPosY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosY); thrust::copy(tPosZ, tPosZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPosZ); thrust::copy(tVelX, tVelX + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelX); thrust::copy(tVelY, tVelY + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelY); thrust::copy(tVelZ, tVelZ + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tVelZ); thrust::copy(tDensity, tDensity + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tDensity); thrust::copy(tEnergy, tEnergy + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tEnergy); thrust::copy(tPressure, tPressure + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tPressure); thrust::copy(tSound, tSound + dm->pn, tFloatDummy); thrust::gather(tIndex, tIndex + dm->pn, tFloatDummy, tSound); pout = thrust::count(tHash, tHash + dm->pn, dGrid.nX * dGrid.nY * dGrid.nZ); dm->pn -= pout; thrust::fill(tSetStart, tSetStart + dGrid.nX * dGrid.nY * dGrid.nZ, 0); thrust::fill(tSetStop, tSetStop + dGrid.nX * dGrid.nY * dGrid.nZ, 0); updateSetsDevice <<< blocks, threads >>> (dm->pn, dm->SetStart, dm->SetStop, dm->Hash); updateListDevice <<< blocks, threads >>> (dm->pn, dm->List, dm->SetStart, dm->SetStop, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dGrid); return 0; } int RKstepHost(struct model *hm, float alpha) { int ip; for (ip = 0; ip < hm->pn; ip++) { hm->VelDotX[ip] = 0.0f; hm->VelDotY[ip] = 0.0f; hm->VelDotZ[ip] = 0.0f; hm->DensityDot[ip] = 0.0f; hm->EnergyDot[ip] = 0.0f; } // External loads updateLoadsHost(hm->pn, hm->Material, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->EnergyDot); // External forces //updateForcesHost(); // Calculate particle interactions balanceMassMomentumHost(hm->pn, hm->List, hm->Material, hm->Mass, hm->Smooth, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Pressure, hm->Sound, hm->DensityDot, hm->VelDotX, hm->VelDotY, hm->VelDotZ); //balanceMassMomentumHostOld(); balanceEnergyHost(hm->pn, hm->Pressure, hm->Density, hm->DensityDot, hm->EnergyDot); //balanceEnergyHostOld(); // Update particles updateParticlesHost(hm->pn, alpha, hm->Material, hm->VelDotX, hm->VelDotY, hm->VelDotZ, hm->DensityDot, hm->EnergyDot, hm->PosX0, hm->PosY0, hm->PosZ0, hm->VelX0, hm->VelY0, hm->VelZ0, hm->Density0, hm->Energy0, hm->PosX, hm->PosY, hm->PosZ, hm->VelX, hm->VelY, hm->VelZ, hm->Density, hm->Energy, hm->Pressure, hm->Sound); //updateParticlesHostOld(alpha); return 0; } int RKstepDevice(struct model *dm, float alpha) { int blocks, threads; blocks = (dm->pn + THREADS - 1) / THREADS; threads = THREADS; thrust::device_ptr<float> tVelDotX(dm->VelDotX); thrust::device_ptr<float> tVelDotY(dm->VelDotY); thrust::device_ptr<float> tVelDotZ(dm->VelDotZ); thrust::device_ptr<float> tDensityDot(dm->DensityDot); thrust::device_ptr<float> tEnergyDot(dm->EnergyDot); thrust::fill(tVelDotX, tVelDotX + dm->pn, 0.0f); thrust::fill(tVelDotY, tVelDotY + dm->pn, 0.0f); thrust::fill(tVelDotZ, tVelDotZ + dm->pn, 0.0f); thrust::fill(tDensityDot, tDensityDot + dm->pn, 0.0f); thrust::fill(tEnergyDot, tEnergyDot + dm->pn, 0.0f); // External loads updateLoadsDevice <<< blocks, threads >>> (dm->pn, dm->Material, dm->PosX, dm->PosY, dm->PosZ, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->EnergyDot); // Calculate particle interactions balanceMassMomentumDevice <<< blocks, threads >>> (dm->pn, dm->List, dm->Material, dm->Mass, dm->Smooth, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Pressure, dm->Sound, dm->DensityDot, dm->VelDotX, dm->VelDotY, dm->VelDotZ); balanceEnergyDevice <<< blocks, threads >>> (dm->pn, dm->Pressure, dm->Density, dm->DensityDot, dm->EnergyDot); // Update particles updateParticlesDevice <<< blocks, threads >>> (dm->pn, alpha, dm->Material, dm->VelDotX, dm->VelDotY, dm->VelDotZ, dm->DensityDot, dm->EnergyDot, dm->PosX0, dm->PosY0, dm->PosZ0, dm->VelX0, dm->VelY0, dm->VelZ0, dm->Density0, dm->Energy0, dm->PosX, dm->PosY, dm->PosZ, dm->VelX, dm->VelY, dm->VelZ, dm->Density, dm->Energy, dm->Pressure, dm->Sound); return 0; } int RKintegrateHost(struct model *hm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); printf("Particles: %i \n", hm->pn); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Inlet conditions inletHost(hm); // Calculate neighbour list neighbourListHost(hm); // Save initial condition backupDataHost(hm); // Step 1 RKstepHost(hm, 1.0); // Step 2 RKstepHost(hm, 1.0 / 4.0); // Step 3 RKstepHost(hm, 2.0 / 3.0); } return 0; } int RKintegrateDevice(struct model *hm, struct model *dm) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { copyDeviceToHost(dm, hm); printf("Saving time: %g \n", ts * hRun.dt); printf("Particles: %i \n", hm->pn); printData(hm); outputVTK(hm, ts / hRun.ssi); } // Inlet conditions inletDevice(dm); // Calculate neighbour list neighbourListDevice(dm); // Save initial condition backupDataDevice(dm); // Step 1 RKstepDevice(dm, 1.0); // Step 2 RKstepDevice(dm, 1.0 / 4.0); // Step 3 RKstepDevice(dm, 2.0 / 3.0); } return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date Oct 20, 2010 * \author Luca Massidda */ struct model hModel, dModel; int i; initHost(&hModel); for (i = 0; i < 10; i++) { hLoad[i].gx = 0.0f; hLoad[i].gy = 0.0f; hLoad[i].gz = 0.0f; hLoad[i].w = 0.0f; hOut[i].nX = 0.0f; hOut[i].nY = 0.0f; hOut[i].nZ = 0.0f; } //initBox(&hModel); //initBath(&hModel); //initDamBreak(&hModel); initChannel(&hModel); initDevice(&dModel); copyHostToDevice(&hModel, &dModel); RKintegrateDevice(&hModel, &dModel); //RKintegrateHost(&hModel); return 0; }
8ef90bc4c82fc82bc67b4d7c60ebef28c97e96e1.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include <vector> #include "ATen/hip/HIPContext.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh" #include "open3d/ml/pytorch/TorchHelper.h" #include "torch/script.h" using namespace open3d::ml::impl; template <class TReal, class TIndex> void ContinuousConvTransposeBackpropFilterCUDA( const torch::Tensor& filters, const torch::Tensor& out_positions, const torch::Tensor& out_importance, const torch::Tensor& extents, const torch::Tensor& offset, const torch::Tensor& inp_positions, const torch::Tensor& inp_features, const torch::Tensor& inp_neighbors_importance_sum, const torch::Tensor& inp_neighbors_row_splits, const torch::Tensor& neighbors_index, const torch::Tensor& neighbors_importance, const torch::Tensor& neighbors_row_splits, const torch::Tensor& out_features_gradient, const bool align_corners, const CoordinateMapping coordinate_mapping, const bool normalize, const InterpolationMode interpolation, const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop) { const bool individual_extents = extents.size(0) > 1; const bool isotropic_extents = extents.size(1) == 1; std::vector<int> filter_dims; for (auto d : filters.sizes()) { filter_dims.push_back(d); } auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; auto device = filters.device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeBackpropFilterCUDA<TReal, TIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.data_ptr<TReal>(), filter_dims, out_positions.size(0), out_positions.data_ptr<TReal>(), out_importance.size(0) ? out_importance.data_ptr<TReal>() : nullptr, inp_positions.size(0), inp_positions.data_ptr<TReal>(), inp_features.data_ptr<TReal>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TReal>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TReal>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), extents.data_ptr<TReal>(), offset.data_ptr<TReal>(), out_features_gradient.data_ptr<TReal>(), interpolation, coordinate_mapping, align_corners, individual_extents, isotropic_extents, normalize); temp_size = ::max( ::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually run the operation CConvTransposeBackpropFilterCUDA<TReal, TIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.data_ptr<TReal>(), filter_dims, out_positions.size(0), out_positions.data_ptr<TReal>(), out_importance.size(0) ? out_importance.data_ptr<TReal>() : nullptr, inp_positions.size(0), inp_positions.data_ptr<TReal>(), inp_features.data_ptr<TReal>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TReal>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TReal>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), extents.data_ptr<TReal>(), offset.data_ptr<TReal>(), out_features_gradient.data_ptr<TReal>(), interpolation, coordinate_mapping, align_corners, individual_extents, isotropic_extents, normalize); } #define INSTANTIATE(TReal, TIndex) \ template void ContinuousConvTransposeBackpropFilterCUDA<TReal, TIndex>( \ const torch::Tensor& filters, const torch::Tensor& out_positions, \ const torch::Tensor& out_importance, const torch::Tensor& extents, \ const torch::Tensor& offset, const torch::Tensor& inp_positions, \ const torch::Tensor& inp_features, \ const torch::Tensor& inp_neighbors_importance_sum, \ const torch::Tensor& inp_neighbors_row_splits, \ const torch::Tensor& neighbors_index, \ const torch::Tensor& neighbors_importance, \ const torch::Tensor& neighbors_row_splits, \ const torch::Tensor& out_features_gradient, \ const bool align_corners, \ const CoordinateMapping coordinate_mapping, const bool normalize, \ const InterpolationMode interpolation, \ const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop); INSTANTIATE(float, int32_t)
8ef90bc4c82fc82bc67b4d7c60ebef28c97e96e1.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include <vector> #include "ATen/cuda/CUDAContext.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh" #include "open3d/ml/pytorch/TorchHelper.h" #include "torch/script.h" using namespace open3d::ml::impl; template <class TReal, class TIndex> void ContinuousConvTransposeBackpropFilterCUDA( const torch::Tensor& filters, const torch::Tensor& out_positions, const torch::Tensor& out_importance, const torch::Tensor& extents, const torch::Tensor& offset, const torch::Tensor& inp_positions, const torch::Tensor& inp_features, const torch::Tensor& inp_neighbors_importance_sum, const torch::Tensor& inp_neighbors_row_splits, const torch::Tensor& neighbors_index, const torch::Tensor& neighbors_importance, const torch::Tensor& neighbors_row_splits, const torch::Tensor& out_features_gradient, const bool align_corners, const CoordinateMapping coordinate_mapping, const bool normalize, const InterpolationMode interpolation, const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop) { const bool individual_extents = extents.size(0) > 1; const bool isotropic_extents = extents.size(1) == 1; std::vector<int> filter_dims; for (auto d : filters.sizes()) { filter_dims.push_back(d); } auto stream = at::cuda::getCurrentCUDAStream(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; auto device = filters.device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeBackpropFilterCUDA<TReal, TIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.data_ptr<TReal>(), filter_dims, out_positions.size(0), out_positions.data_ptr<TReal>(), out_importance.size(0) ? out_importance.data_ptr<TReal>() : nullptr, inp_positions.size(0), inp_positions.data_ptr<TReal>(), inp_features.data_ptr<TReal>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TReal>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TReal>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), extents.data_ptr<TReal>(), offset.data_ptr<TReal>(), out_features_gradient.data_ptr<TReal>(), interpolation, coordinate_mapping, align_corners, individual_extents, isotropic_extents, normalize); temp_size = std::max( std::min(size_t(max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually run the operation CConvTransposeBackpropFilterCUDA<TReal, TIndex>( stream, temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.data_ptr<TReal>(), filter_dims, out_positions.size(0), out_positions.data_ptr<TReal>(), out_importance.size(0) ? out_importance.data_ptr<TReal>() : nullptr, inp_positions.size(0), inp_positions.data_ptr<TReal>(), inp_features.data_ptr<TReal>(), inp_neighbors_importance_sum.size(0) ? inp_neighbors_importance_sum.data_ptr<TReal>() : nullptr, inp_neighbors_row_splits.data_ptr<int64_t>(), neighbors_index.size(0), neighbors_index.data_ptr<TIndex>(), neighbors_importance.size(0) ? neighbors_importance.data_ptr<TReal>() : nullptr, neighbors_row_splits.data_ptr<int64_t>(), extents.data_ptr<TReal>(), offset.data_ptr<TReal>(), out_features_gradient.data_ptr<TReal>(), interpolation, coordinate_mapping, align_corners, individual_extents, isotropic_extents, normalize); } #define INSTANTIATE(TReal, TIndex) \ template void ContinuousConvTransposeBackpropFilterCUDA<TReal, TIndex>( \ const torch::Tensor& filters, const torch::Tensor& out_positions, \ const torch::Tensor& out_importance, const torch::Tensor& extents, \ const torch::Tensor& offset, const torch::Tensor& inp_positions, \ const torch::Tensor& inp_features, \ const torch::Tensor& inp_neighbors_importance_sum, \ const torch::Tensor& inp_neighbors_row_splits, \ const torch::Tensor& neighbors_index, \ const torch::Tensor& neighbors_importance, \ const torch::Tensor& neighbors_row_splits, \ const torch::Tensor& out_features_gradient, \ const bool align_corners, \ const CoordinateMapping coordinate_mapping, const bool normalize, \ const InterpolationMode interpolation, \ const int64_t max_temp_mem_MB, torch::Tensor& filter_backprop); INSTANTIATE(float, int32_t)
a306612ffd08240f1c73b4ebbdd2f803e00c39c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ __global__ void subsample(float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) float the_weight = weight[k]; // Initialize to the bias float the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = the_weight*sum + the_bias; } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ __global__ void subgradweight(float *input, float *gradOutput, float *gradWeight, float *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = blockDim.x*threadIdx.y + threadIdx.x; // create array to hold partial sums __shared__ float sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for(int i = 0; i < blockDim.x*blockDim.y; i++) gradWeight[k] += scale*sums[i]; } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i=0; i<(blockDim.x*blockDim.y); i++) gradBias[k] += scale*sums[i]; } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } void THNN_CudaSpatialSubSampling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, int kW, int kH, int dW, int dH) { float *weight_data = THCudaTensor_data(state, weight); float *bias_data = THCudaTensor_data(state, bias); float *output_data; float *input_data; int nInputPlane = THCudaTensor_size(state, weight, 0); THCUNN_assertSameGPU(state, 4, input, output, weight, bias); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialSubsampling.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialSubSampling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, int kW, int kH, int dW, int dH) { THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput); int nInputPlane = THCudaTensor_size(state, weight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialSubsampling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialSubSampling_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, int kW, int kH, int dW, int dH, float scale) { THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias); int nInputPlane = THCudaTensor_size(state, gradWeight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel hipLaunchKernelGGL(( subgradweight) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, gradOutput_data, gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel long sl; for (sl=0; sl<nbatch; sl++) { hipLaunchKernelGGL(( subgradweight) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data + sl*input->stride[0], gradOutput_data + sl*gradOutput->stride[0], gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } } // clean THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialSubsampling.accGradParameters: %s\n", hipGetErrorString(err)); THError("aborting"); } } #undef CUDA_MAX_THREADS
a306612ffd08240f1c73b4ebbdd2f803e00c39c0.cu
#include "THCUNN.h" #include "common.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ __global__ void subsample(float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) float the_weight = weight[k]; // Initialize to the bias float the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = the_weight*sum + the_bias; } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ __global__ void subgradweight(float *input, float *gradOutput, float *gradWeight, float *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = blockDim.x*threadIdx.y + threadIdx.x; // create array to hold partial sums __shared__ float sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for(int i = 0; i < blockDim.x*blockDim.y; i++) gradWeight[k] += scale*sums[i]; } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((threadIdx.x == 0) && (threadIdx.y == 0)) { for (int i=0; i<(blockDim.x*blockDim.y); i++) gradBias[k] += scale*sums[i]; } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } void THNN_CudaSpatialSubSampling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, int kW, int kH, int dW, int dH) { float *weight_data = THCudaTensor_data(state, weight); float *bias_data = THCudaTensor_data(state, bias); float *output_data; float *input_data; int nInputPlane = THCudaTensor_size(state, weight, 0); THCUNN_assertSameGPU(state, 4, input, output, weight, bias); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialSubsampling.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialSubSampling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, int kW, int kH, int dW, int dH) { THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput); int nInputPlane = THCudaTensor_size(state, weight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialSubsampling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } } void THNN_CudaSpatialSubSampling_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, int kW, int kH, int dW, int dH, float scale) { THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias); int nInputPlane = THCudaTensor_size(state, gradWeight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel subgradweight <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( input_data, gradOutput_data, gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel long sl; for (sl=0; sl<nbatch; sl++) { subgradweight <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> ( input_data + sl*input->stride[0], gradOutput_data + sl*gradOutput->stride[0], gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } } // clean THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialSubsampling.accGradParameters: %s\n", cudaGetErrorString(err)); THError("aborting"); } } #undef CUDA_MAX_THREADS
b2fd25e38610c2792800985b57f45a7e1c03c5ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "vector.h" // includes, project #include <cutil.h> #include "Intersection_K.h" __device__ float dot_product(float *v1, float *v2) { return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; } __device__ float det3(float **v1) { return v1[0][0] * v1[1][1] * v1[2][2] + v1[0][1] * v1[1][2] * v1[2][0] + v1[0][2] * v1[1][0] * v1[2][1] - v1[0][2] * v1[1][1] * v1[2][0] - v1[0][0] * v1[1][2] * v1[2][1] - v1[0][1] * v1[1][0] * v1[2][2]; ; } __device__ float hit(ray_dev_t ray, triangle_dev_t *tri); { float result = -1; float bBeta, bGamma, bT; float detA; float A[3][3]; A[0][0] = tri->c1[0]-tri->c2[0]; A[0][1] = tri->c1[0]-tri->c3[0]; A[0][2] = ray.dir[0]; A[1][0] = tri->c1[1]-tri->c2[1]; A[1][1] = tri->c1[1]-tri->c3[1]; A[1][2] = ray.dir[1]; A[2][0] = tri->c1[2]-tri->c2[2]; A[2][1] = tri->c1[2]-tri->c3[2]; A[2][2] = ray.dir[2]; detA = det3(A); float baryT[3][3]; baryT[0][0] = tri->c1[0]-tri->c2[0]; baryT[0][1] = tri->c1[0]-tri->c3[0]; baryT[0][2] = tri->c1[0]-ray.point[0]; baryT[1][0] = tri->c1[1]-tri->c2[1]; baryT[1][1] = tri->c1[1]-tri->c3[1]; baryT[1][2] = tri->c1[1]-ray.point[1]; baryT[2][0] = tri->c1[2]-tri->c2[2]; baryT[2][1] = tri->c1[2]-tri->c3[2]; baryT[2][2] = tri->c1[2]-ray.point[2]; bT = det3(baryT) / detA; if (bT < 0) { result = 0; } else { float baryGamma[3][3]; baryGamma[0][0] = tri->c1[0]-tri->c2[0]; baryGamma[0][1] = tri->c1[0]-ray.point[0]; baryGamma[0][2] = ray.dir[0]; baryGamma[1][0] = tri->c1[1]-tri->c2[1]; baryGamma[1][1] = tri->c1[1]-ray.point[1]; baryGamma[1][2] = ray.dir[1]; baryGamma[2][0] = tri->c1[2]-tri->c2[2]; baryGamma[2][2] = tri->c1[2]-ray.point[2]; baryGamma[2][3] = ray.dir[2]; bGamma = det3(baryGamma) / detA; if (bGamma < 0 || bGamma > 1) { result = 0; } else { float baryBeta[3][3]; baryBeta[0][0] = tri->c1[0]-ray.point[0]; baryBeta[0][1] = tri->c1[0]-tri->c3[0]; baryBeta[0][2] = ray.dir[0]; baryBeta[1][0] = tri->c1[1]-ray.point[1]; baryBeta[1][1] = tri->c1[1]-tri->c3[1]; baryBeta[1][2] = ray.dir[1]; baryBeta[2][0] = tri->c1[2]-ray.point[2]; baryBeta[2][1] = tri->c1[2]-tri->c3[2]; baryBeta[2][2] = ray.dir[2]; bBeta = det3(baryBeta) / detA; if (bBeta < 0 || bBeta > 1 - bGamma) { result = 0; } } } if (result != 0) { result = bT; } return (result); } __device__ float hit(ray_dev_t ray, sphere_dev_t *sp) { float oMinusC[3]; oMinusC[0] = ray.point[0] - sp->point[0]; oMinusC[1] = ray.point[1] - sp->point[1]; oMinusC[2] = ray.point[2] - sp->point[2]; float _b = dot_product(ray.dir,oMinusC); float _c = dot_product(oMinusC,oMinusC) - (sp->rad * sp->rad); float det = _b * _b - _c; if (det < 0) { return -1; } float t0 = -_b - (float)sqrt(det); float t1 = -_b + (float)sqrt(det); if (t0 >= 0 && t1 >= 0.0) { if (t0 < t1) { return t0; } else if (t1 < t0) { return t1; } } else if (t0 >= 0) { return t0; } else if (t1 >= 0) { return t1; } return -1; } __global__ void Intersection_Test(ray_dev_t *rays, sphere_dev_t *spheres, intersection_t *intersects, int size, int width, int height) { float t; bool hitFound = false; float curDepth = -1.0; int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; if(x > width || y > height) return; ray_dev_t *ray = rays + x * height + y; intersection_t *result = intersects + x * height + y; result->index = -1; for (int i = 0; i < size; i++) { sphere_dev_t *curObject = &spheres[i]; t = -1.0; t = hit(*ray, curObject); if (t >= 0) { if (t >= 0 && (!hitFound || (hitFound && t < curDepth))) { curDepth = t; result->index = i; } } hitFound |= (t >= 0 && curDepth > 0.0); } //Add intersection result->t = curDepth; }
b2fd25e38610c2792800985b57f45a7e1c03c5ad.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "vector.h" // includes, project #include <cutil.h> #include "Intersection_K.h" __device__ float dot_product(float *v1, float *v2) { return v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]; } __device__ float det3(float **v1) { return v1[0][0] * v1[1][1] * v1[2][2] + v1[0][1] * v1[1][2] * v1[2][0] + v1[0][2] * v1[1][0] * v1[2][1] - v1[0][2] * v1[1][1] * v1[2][0] - v1[0][0] * v1[1][2] * v1[2][1] - v1[0][1] * v1[1][0] * v1[2][2]; ; } __device__ float hit(ray_dev_t ray, triangle_dev_t *tri); { float result = -1; float bBeta, bGamma, bT; float detA; float A[3][3]; A[0][0] = tri->c1[0]-tri->c2[0]; A[0][1] = tri->c1[0]-tri->c3[0]; A[0][2] = ray.dir[0]; A[1][0] = tri->c1[1]-tri->c2[1]; A[1][1] = tri->c1[1]-tri->c3[1]; A[1][2] = ray.dir[1]; A[2][0] = tri->c1[2]-tri->c2[2]; A[2][1] = tri->c1[2]-tri->c3[2]; A[2][2] = ray.dir[2]; detA = det3(A); float baryT[3][3]; baryT[0][0] = tri->c1[0]-tri->c2[0]; baryT[0][1] = tri->c1[0]-tri->c3[0]; baryT[0][2] = tri->c1[0]-ray.point[0]; baryT[1][0] = tri->c1[1]-tri->c2[1]; baryT[1][1] = tri->c1[1]-tri->c3[1]; baryT[1][2] = tri->c1[1]-ray.point[1]; baryT[2][0] = tri->c1[2]-tri->c2[2]; baryT[2][1] = tri->c1[2]-tri->c3[2]; baryT[2][2] = tri->c1[2]-ray.point[2]; bT = det3(baryT) / detA; if (bT < 0) { result = 0; } else { float baryGamma[3][3]; baryGamma[0][0] = tri->c1[0]-tri->c2[0]; baryGamma[0][1] = tri->c1[0]-ray.point[0]; baryGamma[0][2] = ray.dir[0]; baryGamma[1][0] = tri->c1[1]-tri->c2[1]; baryGamma[1][1] = tri->c1[1]-ray.point[1]; baryGamma[1][2] = ray.dir[1]; baryGamma[2][0] = tri->c1[2]-tri->c2[2]; baryGamma[2][2] = tri->c1[2]-ray.point[2]; baryGamma[2][3] = ray.dir[2]; bGamma = det3(baryGamma) / detA; if (bGamma < 0 || bGamma > 1) { result = 0; } else { float baryBeta[3][3]; baryBeta[0][0] = tri->c1[0]-ray.point[0]; baryBeta[0][1] = tri->c1[0]-tri->c3[0]; baryBeta[0][2] = ray.dir[0]; baryBeta[1][0] = tri->c1[1]-ray.point[1]; baryBeta[1][1] = tri->c1[1]-tri->c3[1]; baryBeta[1][2] = ray.dir[1]; baryBeta[2][0] = tri->c1[2]-ray.point[2]; baryBeta[2][1] = tri->c1[2]-tri->c3[2]; baryBeta[2][2] = ray.dir[2]; bBeta = det3(baryBeta) / detA; if (bBeta < 0 || bBeta > 1 - bGamma) { result = 0; } } } if (result != 0) { result = bT; } return (result); } __device__ float hit(ray_dev_t ray, sphere_dev_t *sp) { float oMinusC[3]; oMinusC[0] = ray.point[0] - sp->point[0]; oMinusC[1] = ray.point[1] - sp->point[1]; oMinusC[2] = ray.point[2] - sp->point[2]; float _b = dot_product(ray.dir,oMinusC); float _c = dot_product(oMinusC,oMinusC) - (sp->rad * sp->rad); float det = _b * _b - _c; if (det < 0) { return -1; } float t0 = -_b - (float)sqrt(det); float t1 = -_b + (float)sqrt(det); if (t0 >= 0 && t1 >= 0.0) { if (t0 < t1) { return t0; } else if (t1 < t0) { return t1; } } else if (t0 >= 0) { return t0; } else if (t1 >= 0) { return t1; } return -1; } __global__ void Intersection_Test(ray_dev_t *rays, sphere_dev_t *spheres, intersection_t *intersects, int size, int width, int height) { float t; bool hitFound = false; float curDepth = -1.0; int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; if(x > width || y > height) return; ray_dev_t *ray = rays + x * height + y; intersection_t *result = intersects + x * height + y; result->index = -1; for (int i = 0; i < size; i++) { sphere_dev_t *curObject = &spheres[i]; t = -1.0; t = hit(*ray, curObject); if (t >= 0) { if (t >= 0 && (!hitFound || (hitFound && t < curDepth))) { curDepth = t; result->index = i; } } hitFound |= (t >= 0 && curDepth > 0.0); } //Add intersection result->t = curDepth; }
d005c6fac5d79b37a2bdb1c3a6be3f55e477de45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlaswp.cu, normal z -> c, Thu Oct 8 23:05:34 2020 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } claswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_kernel( int n, magmaFloatComplex *dAT, int ldda, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in cgessm, cgetrf_incpiv. extern "C" void magmablas_claswp( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( claswp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswpx_kernel( int n, magmaFloatComplex *dA, int ldx, int ldy, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaFloatComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dA + i2*ldx; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswpx( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( claswpx_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_claswp // (including copying pivots to the GPU). __global__ void claswp2_kernel( int n, magmaFloatComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswp2( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); hipLaunchKernelGGL(( claswp2_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
d005c6fac5d79b37a2bdb1c3a6be3f55e477de45.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlaswp.cu, normal z -> c, Thu Oct 8 23:05:34 2020 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } claswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_kernel( int n, magmaFloatComplex *dAT, int ldda, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in cgessm, cgetrf_incpiv. extern "C" void magmablas_claswp( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } claswp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswpx_kernel( int n, magmaFloatComplex *dA, int ldx, int ldy, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaFloatComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dA + i2*ldx; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswpx( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } claswpx_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_claswp // (including copying pivots to the GPU). __global__ void claswp2_kernel( int n, magmaFloatComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswp2( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp2_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
caa78db28da3323671cf73b974055d93c2386335.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void helloFromGPU() { printf("Hello World from GPU thread %d!\n",threadIdx.x); } int main() { printf("Hello World from CPU!\n"); hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10) , 0, 0, ); hipDeviceReset(); //hipDeviceSynchronize(); // Without hipDeviceReset() or hipDeviceSynchronize() the kernel messages are not printed. // In addition, the .exe file handle is still held by malwarebytes... sometimes. // Maybe only after Malwarebytes has been running a long time. // Restarting Malwarebytes fixes things. return 0; }
caa78db28da3323671cf73b974055d93c2386335.cu
#include <stdio.h> __global__ void helloFromGPU() { printf("Hello World from GPU thread %d!\n",threadIdx.x); } int main() { printf("Hello World from CPU!\n"); helloFromGPU <<<1, 10 >>> (); cudaDeviceReset(); //cudaDeviceSynchronize(); // Without cudaDeviceReset() or cudaDeviceSynchronize() the kernel messages are not printed. // In addition, the .exe file handle is still held by malwarebytes... sometimes. // Maybe only after Malwarebytes has been running a long time. // Restarting Malwarebytes fixes things. return 0; }
c68b1a00c426bb896b8249eeaa4f9eaed15c0189.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #include "RadixSort.h" namespace RadixSort { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernGen_b_e_array(int N, int idxBit, int* b_array, int* e_array, const int *dev_data) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int temp_result = (dev_data[index] >> idxBit) & 1; b_array[index] = temp_result; e_array[index] = 1 - temp_result; } template<int BLOCK_SIZE> __global__ void kern_Gen_d_array_and_scatter(int N, const int totalFalses, const int* b_array, const int* f_array, int* dev_data) { //Allocate appropriate shared memory __shared__ int tile[BLOCK_SIZE]; int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } tile[threadIdx.x] = dev_data[index]; __syncthreads(); int t_array_value = index - f_array[index] + totalFalses; int d_array_value = b_array[index] ? t_array_value : f_array[index]; dev_data[d_array_value] = tile[threadIdx.x]; } void sort(int n, int numOfBits, int *odata, const int *idata) { int* dev_data; int* b_array; int* e_array; int* f_array; int* host_f_array = new int[n]; dim3 blockDim(blockSize); dim3 gridDim((n + blockSize - 1) / blockSize); hipMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("hipMalloc dev_data failed!"); hipMalloc((void**)&b_array, n * sizeof(int)); checkCUDAError("hipMalloc b_array failed!"); hipMalloc((void**)&e_array, n * sizeof(int)); checkCUDAError("hipMalloc e_array failed!"); hipMalloc((void**)&f_array, n * sizeof(int)); checkCUDAError("hipMalloc f_array failed!"); hipDeviceSynchronize(); hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice); checkCUDAError("RadixSort hipMemcpy failed!"); timer().startGpuTimer(); for (int k = 0; k <= numOfBits - 1; k++) { kernGen_b_e_array << <gridDim, blockDim >> > (n, k, b_array, e_array, dev_data); hipMemcpy(host_f_array, e_array, sizeof(int) * n, hipMemcpyDeviceToHost); int totalFalses = host_f_array[n - 1]; // Get Exclusive scan result as a whole StreamCompaction::Efficient::scan(n, host_f_array, host_f_array); //StreamCompaction::Efficient::scanDynamicShared(n, host_f_array, host_f_array); totalFalses += host_f_array[n - 1]; hipMemcpy(f_array, host_f_array, sizeof(int) * n, hipMemcpyHostToDevice); // Since here we run exclusive scan as a whole, // and we don't want each tile to run StreamCompaction::Efficient::scan individually. // value in d_array here is actually index value in the whole data array, not just index in that tile // so, there is NO need to merge here kern_Gen_d_array_and_scatter<blockSize> << <gridDim, blockDim >> > (n, totalFalses, b_array, f_array, dev_data); } timer().endGpuTimer(); hipMemcpy(odata, dev_data, sizeof(int) * n, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy failed!"); hipFree(dev_data); hipFree(b_array); hipFree(e_array); hipFree(f_array); delete[] host_f_array; } }
c68b1a00c426bb896b8249eeaa4f9eaed15c0189.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #include "RadixSort.h" namespace RadixSort { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernGen_b_e_array(int N, int idxBit, int* b_array, int* e_array, const int *dev_data) { // TODO int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } int temp_result = (dev_data[index] >> idxBit) & 1; b_array[index] = temp_result; e_array[index] = 1 - temp_result; } template<int BLOCK_SIZE> __global__ void kern_Gen_d_array_and_scatter(int N, const int totalFalses, const int* b_array, const int* f_array, int* dev_data) { //Allocate appropriate shared memory __shared__ int tile[BLOCK_SIZE]; int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } tile[threadIdx.x] = dev_data[index]; __syncthreads(); int t_array_value = index - f_array[index] + totalFalses; int d_array_value = b_array[index] ? t_array_value : f_array[index]; dev_data[d_array_value] = tile[threadIdx.x]; } void sort(int n, int numOfBits, int *odata, const int *idata) { int* dev_data; int* b_array; int* e_array; int* f_array; int* host_f_array = new int[n]; dim3 blockDim(blockSize); dim3 gridDim((n + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_data, n * sizeof(int)); checkCUDAError("cudaMalloc dev_data failed!"); cudaMalloc((void**)&b_array, n * sizeof(int)); checkCUDAError("cudaMalloc b_array failed!"); cudaMalloc((void**)&e_array, n * sizeof(int)); checkCUDAError("cudaMalloc e_array failed!"); cudaMalloc((void**)&f_array, n * sizeof(int)); checkCUDAError("cudaMalloc f_array failed!"); cudaDeviceSynchronize(); cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice); checkCUDAError("RadixSort cudaMemcpy failed!"); timer().startGpuTimer(); for (int k = 0; k <= numOfBits - 1; k++) { kernGen_b_e_array << <gridDim, blockDim >> > (n, k, b_array, e_array, dev_data); cudaMemcpy(host_f_array, e_array, sizeof(int) * n, cudaMemcpyDeviceToHost); int totalFalses = host_f_array[n - 1]; // Get Exclusive scan result as a whole StreamCompaction::Efficient::scan(n, host_f_array, host_f_array); //StreamCompaction::Efficient::scanDynamicShared(n, host_f_array, host_f_array); totalFalses += host_f_array[n - 1]; cudaMemcpy(f_array, host_f_array, sizeof(int) * n, cudaMemcpyHostToDevice); // Since here we run exclusive scan as a whole, // and we don't want each tile to run StreamCompaction::Efficient::scan individually. // value in d_array here is actually index value in the whole data array, not just index in that tile // so, there is NO need to merge here kern_Gen_d_array_and_scatter<blockSize> << <gridDim, blockDim >> > (n, totalFalses, b_array, f_array, dev_data); } timer().endGpuTimer(); cudaMemcpy(odata, dev_data, sizeof(int) * n, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy failed!"); cudaFree(dev_data); cudaFree(b_array); cudaFree(e_array); cudaFree(f_array); delete[] host_f_array; } }
75ccffb5a44a43f3c5dbd33064d90c6ca35f5d8d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_cudaCompute_AtP.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_A = NULL; hipMalloc(&d_A, XSIZE*YSIZE); double *d_P = NULL; hipMalloc(&d_P, XSIZE*YSIZE); double *d_AtP = NULL; hipMalloc(&d_AtP, XSIZE*YSIZE); int rows = XSIZE; int columns = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_cudaCompute_AtP), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_P,d_AtP,rows,columns); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_cudaCompute_AtP), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_P,d_AtP,rows,columns); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_cudaCompute_AtP), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_P,d_AtP,rows,columns); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
75ccffb5a44a43f3c5dbd33064d90c6ca35f5d8d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_cudaCompute_AtP.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *d_A = NULL; cudaMalloc(&d_A, XSIZE*YSIZE); double *d_P = NULL; cudaMalloc(&d_P, XSIZE*YSIZE); double *d_AtP = NULL; cudaMalloc(&d_AtP, XSIZE*YSIZE); int rows = XSIZE; int columns = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_cudaCompute_AtP<<<gridBlock,threadBlock>>>(d_A,d_P,d_AtP,rows,columns); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_cudaCompute_AtP<<<gridBlock,threadBlock>>>(d_A,d_P,d_AtP,rows,columns); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_cudaCompute_AtP<<<gridBlock,threadBlock>>>(d_A,d_P,d_AtP,rows,columns); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
67d408ac4f92d3ba63fbbdae53bf6bae17d90cb8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> // #include <cupti.h> #include <math_constants.h> // #include "../../lcutil.h" #include <hip/hip_runtime_api.h> #define CUDA_SAFE_CALL( call) { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) // #define COMP_ITERATIONS (512) #define THREADS (1024) #define BLOCKS (3276) #define N (10) #define REGBLOCK_SIZE (4) // #define UNROLL_ITERATIONS (32) #define deviceNum (0) // #define OFFSET #define INNER_REPS 8192 #define UNROLLS 32 // __constant__ __device__ int off [16] = {0,4,8,12,9,13,1,5,2,6,10,14,11,15,3,7}; //512 threads // __constant__ __device__ int off [16] = {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3}; //512 threads // __constant__ __device__ int off [16] = {0,2,4,6,8,10,12,14,11,9,15,13,3,1,7,5}; //256 threads template <class T> __global__ void benchmark (T* cdin, T* cdout){ // const int total = THREADS*BLOCKS+THREADS; const int ite = blockIdx.x * THREADS + threadIdx.x; T r0; // printf("%d - %d\n", blockIdx.x,off[blockIdx.x]); // T r1,r2,r3; // r0=cdin[ite]; for (int k=0; k<N;k++){ #pragma unroll 8192 for(int j=0; j<INNER_REPS; j+=UNROLLS){ r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; } } cdout[ite]=r0; } double median(int n, double x[][4],int col) { double temp; int i, j; // the following two loops sort the array x in ascending order for(i=0; i<n-1; i++) { for(j=i+1; j<n; j++) { if(x[j][col] < x[i][col]) { // swap elements temp = x[i][col]; x[i][col] = x[j][col]; x[j][col] = temp; } } } if(n%2==0) { // if there is an even number of elements, return mean of the two elements in the middle return((x[n/2][col] + x[n/2 - 1][col]) / 2.0); } else { // else return the element in the middle return x[n/2][col]; } } void initializeEvents(hipEvent_t *start, hipEvent_t *stop){ CUDA_SAFE_CALL( hipEventCreate(start) ); CUDA_SAFE_CALL( hipEventCreate(stop) ); CUDA_SAFE_CALL( hipEventRecord(*start, 0) ); } float finalizeEvents(hipEvent_t start, hipEvent_t stop){ CUDA_SAFE_CALL( hipGetLastError() ); CUDA_SAFE_CALL( hipEventRecord(stop, 0) ); CUDA_SAFE_CALL( hipEventSynchronize(stop) ); float kernel_time; CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) ); CUDA_SAFE_CALL( hipEventDestroy(start) ); CUDA_SAFE_CALL( hipEventDestroy(stop) ); return kernel_time; } void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout){ hipEvent_t start, stop; initializeEvents(&start, &stop); dim3 dimBlock(THREADS, 1, 1); dim3 dimGrid(BLOCKS, 1, 1); // if (type==0){ hipLaunchKernelGGL(( benchmark<float>), dim3(dimGrid), dim3(dimBlock) , 0, 0, (float*)cdin,(float*)cdout); // }else{ // benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, inner_reps, unrolls); // } long long shared_access = 2*(long long)(INNER_REPS)*N*THREADS*BLOCKS; hipDeviceSynchronize(); double time = finalizeEvents(start, stop); double result; if (type==0) result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024); else result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024); *kernel_time = time; *bandw=result; } int main(int argc, char *argv[]){ // CUpti_SubscriberHandle subscriber; hipCtx_t context = 0; hipDevice_t device = 0; int deviceCount; char deviceName[32]; int outer_reps; // , vector_size, tile_dim; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // cupti_eventData cuptiEvent; // RuntimeApiTrace_t trace; hipDeviceProp_t deviceProp; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); hipSetDevice(deviceNum); double mean[4]; double time[outer_reps][2],value[outer_reps][4],sum_dev_median[4],sum_dev_mean[4],medianv[4],std_dev_mean[4],std_dev_median[4]; long SPresult[outer_reps],DPresult[outer_reps],timeresult[outer_reps][2]; int L2size; int counters; // StoreDeviceInfo_DRAM(stdout,&L2size); int size = THREADS*BLOCKS*sizeof(double); size_t freeCUDAMem, totalCUDAMem; hipMemGetInfo(&freeCUDAMem, &totalCUDAMem); printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem); printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024)); SPresult[0]=0; DPresult[0]=0; //Initialize Global Memory double *cdin,L2=32; double *cdout; CUDA_SAFE_CALL(hipMalloc((void**)&cdin, size)); CUDA_SAFE_CALL(hipMalloc((void**)&cdout, size)); // Copy data to device memory CUDA_SAFE_CALL(hipMemset(cdin, 1, size)); // initialize to zeros CUDA_SAFE_CALL(hipMemset(cdout, 0, size)); // initialize to zeros // Synchronize in order to wait for memory operations to finish CUDA_SAFE_CALL(hipDeviceSynchronize()); // make sure activity is enabled before any CUDA API DRIVER_API_CALL(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } printf("CUDA Device Number: %d\n", deviceNum); DRIVER_API_CALL(hipDeviceGet(&device, deviceNum)); CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device)); DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device)); int i; class type; uint64_t L2units; size_t sizet=sizeof(L2units); for (i=0;i<outer_reps;i++){ uint32_t all = 1; runbench(0,&time[0][0],&value[0][0],cdin,cdout); printf("Registered time: %f ms\n",time[0][0]); } CUDA_SAFE_CALL( hipDeviceReset()); return 0; }
67d408ac4f92d3ba63fbbdae53bf6bae17d90cb8.cu
/* * Copyright 2011-2015 NVIDIA Corporation. All rights reserved * * Sample app to demonstrate use of CUPTI library to obtain metric values * using callbacks for CUDA runtime APIs * */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> // #include <cupti.h> #include <math_constants.h> // #include "../../lcutil.h" #include <cuda_profiler_api.h> #define CUDA_SAFE_CALL( call) { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \ __FILE__, __LINE__, #apiFuncCall, _status); \ exit(-1); \ } \ } while (0) #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\ exit(-1); \ } \ } while (0) #define ALIGN_SIZE (8) #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer)) // #define COMP_ITERATIONS (512) #define THREADS (1024) #define BLOCKS (3276) #define N (10) #define REGBLOCK_SIZE (4) // #define UNROLL_ITERATIONS (32) #define deviceNum (0) // #define OFFSET #define INNER_REPS 8192 #define UNROLLS 32 // __constant__ __device__ int off [16] = {0,4,8,12,9,13,1,5,2,6,10,14,11,15,3,7}; //512 threads // __constant__ __device__ int off [16] = {0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3}; //512 threads // __constant__ __device__ int off [16] = {0,2,4,6,8,10,12,14,11,9,15,13,3,1,7,5}; //256 threads template <class T> __global__ void benchmark (T* cdin, T* cdout){ // const int total = THREADS*BLOCKS+THREADS; const int ite = blockIdx.x * THREADS + threadIdx.x; T r0; // printf("%d - %d\n", blockIdx.x,off[blockIdx.x]); // T r1,r2,r3; // r0=cdin[ite]; for (int k=0; k<N;k++){ #pragma unroll 8192 for(int j=0; j<INNER_REPS; j+=UNROLLS){ r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; r0 = cdin[ite]; cdout[ite]=r0; } } cdout[ite]=r0; } double median(int n, double x[][4],int col) { double temp; int i, j; // the following two loops sort the array x in ascending order for(i=0; i<n-1; i++) { for(j=i+1; j<n; j++) { if(x[j][col] < x[i][col]) { // swap elements temp = x[i][col]; x[i][col] = x[j][col]; x[j][col] = temp; } } } if(n%2==0) { // if there is an even number of elements, return mean of the two elements in the middle return((x[n/2][col] + x[n/2 - 1][col]) / 2.0); } else { // else return the element in the middle return x[n/2][col]; } } void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){ CUDA_SAFE_CALL( cudaEventCreate(start) ); CUDA_SAFE_CALL( cudaEventCreate(stop) ); CUDA_SAFE_CALL( cudaEventRecord(*start, 0) ); } float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){ CUDA_SAFE_CALL( cudaGetLastError() ); CUDA_SAFE_CALL( cudaEventRecord(stop, 0) ); CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); float kernel_time; CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) ); CUDA_SAFE_CALL( cudaEventDestroy(start) ); CUDA_SAFE_CALL( cudaEventDestroy(stop) ); return kernel_time; } void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout){ cudaEvent_t start, stop; initializeEvents(&start, &stop); dim3 dimBlock(THREADS, 1, 1); dim3 dimGrid(BLOCKS, 1, 1); // if (type==0){ benchmark<float><<< dimGrid, dimBlock >>>((float*)cdin,(float*)cdout); // }else{ // benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, inner_reps, unrolls); // } long long shared_access = 2*(long long)(INNER_REPS)*N*THREADS*BLOCKS; cudaDeviceSynchronize(); double time = finalizeEvents(start, stop); double result; if (type==0) result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024); else result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024); *kernel_time = time; *bandw=result; } int main(int argc, char *argv[]){ // CUpti_SubscriberHandle subscriber; CUcontext context = 0; CUdevice device = 0; int deviceCount; char deviceName[32]; int outer_reps; // , vector_size, tile_dim; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // cupti_eventData cuptiEvent; // RuntimeApiTrace_t trace; cudaDeviceProp deviceProp; printf("Usage: %s [device_num] [metric_name]\n", argv[0]); cudaSetDevice(deviceNum); double mean[4]; double time[outer_reps][2],value[outer_reps][4],sum_dev_median[4],sum_dev_mean[4],medianv[4],std_dev_mean[4],std_dev_median[4]; long SPresult[outer_reps],DPresult[outer_reps],timeresult[outer_reps][2]; int L2size; int counters; // StoreDeviceInfo_DRAM(stdout,&L2size); int size = THREADS*BLOCKS*sizeof(double); size_t freeCUDAMem, totalCUDAMem; cudaMemGetInfo(&freeCUDAMem, &totalCUDAMem); printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem); printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024)); SPresult[0]=0; DPresult[0]=0; //Initialize Global Memory double *cdin,L2=32; double *cdout; CUDA_SAFE_CALL(cudaMalloc((void**)&cdin, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&cdout, size)); // Copy data to device memory CUDA_SAFE_CALL(cudaMemset(cdin, 1, size)); // initialize to zeros CUDA_SAFE_CALL(cudaMemset(cdout, 0, size)); // initialize to zeros // Synchronize in order to wait for memory operations to finish CUDA_SAFE_CALL(cudaThreadSynchronize()); // make sure activity is enabled before any CUDA API DRIVER_API_CALL(cuDeviceGetCount(&deviceCount)); if (deviceCount == 0) { printf("There is no device supporting CUDA.\n"); return -2; } printf("CUDA Device Number: %d\n", deviceNum); DRIVER_API_CALL(cuDeviceGet(&device, deviceNum)); CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device)); DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device)); int i; class type; uint64_t L2units; size_t sizet=sizeof(L2units); for (i=0;i<outer_reps;i++){ uint32_t all = 1; runbench(0,&time[0][0],&value[0][0],cdin,cdout); printf("Registered time: %f ms\n",time[0][0]); } CUDA_SAFE_CALL( cudaDeviceReset()); return 0; }
7b52832e4a2fe6c730300beac5d375d46e433cfd.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/BCECriterion.cu" #else void THNN_(BCECriterion_updateOutput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights) { THCUNN_check_nElement(state, input, target); THCUNN_check_nElement(state, input, weights); THCUNN_assertSameGPU(state, 3, input, target, weights); if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, output, bce_updateOutput_no_reduce_functor<scalar_t, accreal>()); if (weights) { THCTensor_(cmul)(state, output, output, weights); } return; } THCTensor_(resize1d)(state, output, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); accreal sum; if (weights) { weights = THCTensor_(newContiguous)(state, weights); thrust::device_ptr<scalar_t> weights_data(THCTensor_(data)(state, weights)); sum = thrust::transform_reduce( thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), bce_functor_weights<scalar_t, accreal>(), (accreal) 0, thrust::plus<accreal>() ); THCTensor_(free)(state, weights); } else { sum = thrust::transform_reduce( thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), bce_functor<scalar_t, accreal>(), (accreal) 0, thrust::plus<accreal>() ); } if (reduction == Reduction::Mean) sum /= size; THCTensor_(free)(state, input); THCTensor_(free)(state, target); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum)); } void THNN_(BCECriterion_updateGradInput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights) { THCUNN_check_nElement(state, input, target); THCUNN_check_nElement(state, input, weights); THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights); THCTensor_(resizeAs)(state, gradInput, input); if (reduction == Reduction::None) { THCUNN_check_nElement(state, gradOutput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, gradInput, bce_updateGradInput_no_reduce_functor<scalar_t, accreal>()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); if (weights) { THCTensor_(cmul)(state, gradInput, gradInput, weights); } return; } THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); scalar_t norm = ScalarConvert<accreal, scalar_t>::to((reduction == Reduction::Mean ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput)); if (weights) { weights = THCTensor_(newContiguous)(state, weights); thrust::device_ptr<scalar_t> weights_data(THCTensor_(data)(state, weights)); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), gradInput_data, bce_updateGradInput_functor_weights<scalar_t, accreal>(norm) ); THCTensor_(free)(state, weights); } else { thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), gradInput_data, bce_updateGradInput_functor<scalar_t, accreal>(norm) ); } THCTensor_(free)(state, input); THCTensor_(free)(state, target); } #endif
7b52832e4a2fe6c730300beac5d375d46e433cfd.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/BCECriterion.cu" #else void THNN_(BCECriterion_updateOutput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights) { THCUNN_check_nElement(state, input, target); THCUNN_check_nElement(state, input, weights); THCUNN_assertSameGPU(state, 3, input, target, weights); if (reduction == Reduction::None) { THCTensor_(resizeAs)(state, output, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, output, bce_updateOutput_no_reduce_functor<scalar_t, accreal>()); if (weights) { THCTensor_(cmul)(state, output, output, weights); } return; } THCTensor_(resize1d)(state, output, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); accreal sum; if (weights) { weights = THCTensor_(newContiguous)(state, weights); thrust::device_ptr<scalar_t> weights_data(THCTensor_(data)(state, weights)); sum = thrust::transform_reduce( thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), bce_functor_weights<scalar_t, accreal>(), (accreal) 0, thrust::plus<accreal>() ); THCTensor_(free)(state, weights); } else { sum = thrust::transform_reduce( thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), bce_functor<scalar_t, accreal>(), (accreal) 0, thrust::plus<accreal>() ); } if (reduction == Reduction::Mean) sum /= size; THCTensor_(free)(state, input); THCTensor_(free)(state, target); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum)); } void THNN_(BCECriterion_updateGradInput)( THCState *state, THCTensor *input, THCTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights) { THCUNN_check_nElement(state, input, target); THCUNN_check_nElement(state, input, weights); THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights); THCTensor_(resizeAs)(state, gradInput, input); if (reduction == Reduction::None) { THCUNN_check_nElement(state, gradOutput, input); THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, input, target, gradInput, bce_updateGradInput_no_reduce_functor<scalar_t, accreal>()); THCTensor_(cmul)(state, gradInput, gradInput, gradOutput); if (weights) { THCTensor_(cmul)(state, gradInput, gradInput, weights); } return; } THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); ptrdiff_t size = THCTensor_(nElement)(state, input); scalar_t norm = ScalarConvert<accreal, scalar_t>::to((reduction == Reduction::Mean ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0)); input = THCTensor_(newContiguous)(state, input); target = THCTensor_(newContiguous)(state, target); thrust::device_ptr<scalar_t> input_data(THCTensor_(data)(state, input)); thrust::device_ptr<scalar_t> target_data(THCTensor_(data)(state, target)); thrust::device_ptr<scalar_t> gradInput_data(THCTensor_(data)(state, gradInput)); if (weights) { weights = THCTensor_(newContiguous)(state, weights); thrust::device_ptr<scalar_t> weights_data(THCTensor_(data)(state, weights)); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), gradInput_data, bce_updateGradInput_functor_weights<scalar_t, accreal>(norm) ); THCTensor_(free)(state, weights); } else { thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), gradInput_data, bce_updateGradInput_functor<scalar_t, accreal>(norm) ); } THCTensor_(free)(state, input); THCTensor_(free)(state, target); } #endif
02adc57ea4fc3ade94f9a7ad6e0e91279d6476ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2017 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "rocblas.h" #include "../debug.h" typedef float floatType_t; /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 /* naive GPU kernel where each element of C is computed by a single thread */ __global__ void GPU_naive( const int m, floatType_t const * const a, floatType_t const * const b, floatType_t * const c ) { /* determine my threads's row and col indices in the global C matrix */ const int myrow = blockDim.x * blockIdx.x + threadIdx.x; const int mycol = blockDim.y * blockIdx.y + threadIdx.y; /* if my row and col are in the C matrix, then calculate that value of C */ if( myrow < m && mycol < m ) { register floatType_t temp = 0.0; for( int k = 0; k < m; k++ ) temp += a[INDX( myrow, k, m )] * b[INDX( k, mycol, m )]; c[INDX( myrow, mycol, m )] = temp; } /* end if */ return; } /* end GPU_naive */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); floatType_t *h_a, *h_b, *h_c, *h_c1; floatType_t *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( floatType_t ); h_a = (floatType_t *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (floatType_t *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (floatType_t *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (floatType_t *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( hipMalloc( (void **)&d_a, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_b, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_c, numbytes ) ); /* copy a and b to device */ checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) ); checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) ); hipblasHandle_t handle; checkCUBLAS( hipblasCreate( &handle ) ); floatType_t alpha = 1.0; floatType_t beta = 0.0; /* start timers */ hipEvent_t start, stop; checkCUDA( hipEventCreate( &start ) ); checkCUDA( hipEventCreate( &stop ) ); checkCUDA( hipEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ if( sizeof( floatType_t ) == 4 ) { checkCUBLAS( hipblasSgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, (float *)&alpha, (float *)d_a, size, (float *)d_b, size, (float *)&beta, (float *)d_c, size ) ); } /* end if */ else { checkCUBLAS( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, (double *)&alpha, (double *)d_a, size, (double *)d_b, size, (double *)&beta, (double *)d_c, size ) ); } /* end else */ /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); float elapsedTime; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( hipMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( size / THREADS_PER_BLOCK_X + 1, size / THREADS_PER_BLOCK_Y + 1, 1 ); /* start timers */ checkCUDA( hipEventRecord( start, 0 ) ); /* call GPU_naive */ hipLaunchKernelGGL(( GPU_naive), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c ); checkKERNEL() /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) ); checkCUBLAS( hipblasDestroy( handle ) ); checkCUDA( hipEventDestroy( start ) ); checkCUDA( hipEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp = max( temp, abs( (double)h_c[i] - (double)h_c1[i] )/ abs((double)h_c[i]) ); } /* end for */ printf("Maximum error is %e percent \n",temp*100.0); if( temp > 0.001 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( hipFree( d_a ) ); checkCUDA( hipFree( d_b ) ); checkCUDA( hipFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( hipDeviceReset() ); return 0; }
02adc57ea4fc3ade94f9a7ad6e0e91279d6476ea.cu
/* * Copyright 2017 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "cublas_v2.h" #include "../debug.h" typedef float floatType_t; /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 /* naive GPU kernel where each element of C is computed by a single thread */ __global__ void GPU_naive( const int m, floatType_t const * const a, floatType_t const * const b, floatType_t * const c ) { /* determine my threads's row and col indices in the global C matrix */ const int myrow = blockDim.x * blockIdx.x + threadIdx.x; const int mycol = blockDim.y * blockIdx.y + threadIdx.y; /* if my row and col are in the C matrix, then calculate that value of C */ if( myrow < m && mycol < m ) { register floatType_t temp = 0.0; for( int k = 0; k < m; k++ ) temp += a[INDX( myrow, k, m )] * b[INDX( k, mycol, m )]; c[INDX( myrow, mycol, m )] = temp; } /* end if */ return; } /* end GPU_naive */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); floatType_t *h_a, *h_b, *h_c, *h_c1; floatType_t *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( floatType_t ); h_a = (floatType_t *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (floatType_t *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (floatType_t *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (floatType_t *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_c, numbytes ) ); /* copy a and b to device */ checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) ); cublasHandle_t handle; checkCUBLAS( cublasCreate( &handle ) ); floatType_t alpha = 1.0; floatType_t beta = 0.0; /* start timers */ cudaEvent_t start, stop; checkCUDA( cudaEventCreate( &start ) ); checkCUDA( cudaEventCreate( &stop ) ); checkCUDA( cudaEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ if( sizeof( floatType_t ) == 4 ) { checkCUBLAS( cublasSgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, (float *)&alpha, (float *)d_a, size, (float *)d_b, size, (float *)&beta, (float *)d_c, size ) ); } /* end if */ else { checkCUBLAS( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, (double *)&alpha, (double *)d_a, size, (double *)d_b, size, (double *)&beta, (double *)d_c, size ) ); } /* end else */ /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); float elapsedTime; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( cudaMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 ); dim3 blocks( size / THREADS_PER_BLOCK_X + 1, size / THREADS_PER_BLOCK_Y + 1, 1 ); /* start timers */ checkCUDA( cudaEventRecord( start, 0 ) ); /* call GPU_naive */ GPU_naive<<< blocks, threads >>> ( size, d_a, d_b, d_c ); checkKERNEL() /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) ); checkCUBLAS( cublasDestroy( handle ) ); checkCUDA( cudaEventDestroy( start ) ); checkCUDA( cudaEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp = max( temp, abs( (double)h_c[i] - (double)h_c1[i] )/ abs((double)h_c[i]) ); } /* end for */ printf("Maximum error is %e percent \n",temp*100.0); if( temp > 0.001 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( cudaFree( d_a ) ); checkCUDA( cudaFree( d_b ) ); checkCUDA( cudaFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( cudaDeviceReset() ); return 0; }
b6ca8b21f598f8dcfb2f2927b1cfaa778ed23870.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zgeisai_trsv.cu, normal z -> s, Thu Oct 8 23:05:52 2020 */ #include "magmasparse_internal.h" #include "shuffle.cuh" //#include <hip/hip_runtime_api.h> #define PRECISION_s #define REAL #define BLOCKSIZE 256 #define WARP_SIZE 32 #define WRP 32 #define WRQ 1 #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION >= 7000) #if (CUDA_ARCH >= 300) __device__ void strsv_lower_kernel_general(float *dA, float *dB, int *sizes) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = magmablas_sshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_kernel_general(float *dA, float *dB, int *sizes) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = magmablas_sshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_kernel_1(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_2(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_3(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_4(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_5(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_6(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_7(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_8(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_9(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_10(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_11(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_12(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_13(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_14(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_15(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_16(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_17(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 17; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_18(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 18; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_19(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 19; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_20(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 20; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_21(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 21; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_22(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 22; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_23(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 23; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_24(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 24; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_25(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 25; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_26(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 26; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_27(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 27; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_28(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 28; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_29(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 29; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_30(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 30; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_31(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 31; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_32(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_kernel_1( dA, dB ); break; case 2: strsv_lower_kernel_2( dA, dB ); break; case 3: strsv_lower_kernel_3( dA, dB ); break; case 4: strsv_lower_kernel_4( dA, dB ); break; case 5: strsv_lower_kernel_5( dA, dB ); break; case 6: strsv_lower_kernel_6( dA, dB ); break; case 7: strsv_lower_kernel_7( dA, dB ); break; case 8: strsv_lower_kernel_8( dA, dB ); break; case 9: strsv_lower_kernel_9( dA, dB ); break; case 10: strsv_lower_kernel_10( dA, dB ); break; case 11: strsv_lower_kernel_11( dA, dB ); break; case 12: strsv_lower_kernel_12( dA, dB ); break; case 13: strsv_lower_kernel_13( dA, dB ); break; case 14: strsv_lower_kernel_14( dA, dB ); break; case 15: strsv_lower_kernel_15( dA, dB ); break; case 16: strsv_lower_kernel_16( dA, dB ); break; case 17: strsv_lower_kernel_17( dA, dB ); break; case 18: strsv_lower_kernel_18( dA, dB ); break; case 19: strsv_lower_kernel_19( dA, dB ); break; case 20: strsv_lower_kernel_20( dA, dB ); break; case 21: strsv_lower_kernel_21( dA, dB ); break; case 22: strsv_lower_kernel_22( dA, dB ); break; case 23: strsv_lower_kernel_23( dA, dB ); break; case 24: strsv_lower_kernel_24( dA, dB ); break; case 25: strsv_lower_kernel_25( dA, dB ); break; case 26: strsv_lower_kernel_26( dA, dB ); break; case 27: strsv_lower_kernel_27( dA, dB ); break; case 28: strsv_lower_kernel_28( dA, dB ); break; case 29: strsv_lower_kernel_29( dA, dB ); break; case 30: strsv_lower_kernel_30( dA, dB ); break; case 31: strsv_lower_kernel_31( dA, dB ); break; case 32: strsv_lower_kernel_32( dA, dB ); break; default: strsv_lower_kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_kernel_1(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_2(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_3(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_4(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_5(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_6(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_7(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_8(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_9(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_10(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_11(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_12(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_13(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_14(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_15(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_16(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_17(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 17-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_18(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 18-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_19(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 19-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_20(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 20-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_21(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 21-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_22(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 22-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_23(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 23-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_24(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 24-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_25(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 25-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_26(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 26-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_27(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 27-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_28(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 28-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_29(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 29-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_30(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 30-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_31(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 31-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_32(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_kernel_1( dA, dB ); break; case 2: strsv_upper_kernel_2( dA, dB ); break; case 3: strsv_upper_kernel_3( dA, dB ); break; case 4: strsv_upper_kernel_4( dA, dB ); break; case 5: strsv_upper_kernel_5( dA, dB ); break; case 6: strsv_upper_kernel_6( dA, dB ); break; case 7: strsv_upper_kernel_7( dA, dB ); break; case 8: strsv_upper_kernel_8( dA, dB ); break; case 9: strsv_upper_kernel_9( dA, dB ); break; case 10: strsv_upper_kernel_10( dA, dB ); break; case 11: strsv_upper_kernel_11( dA, dB ); break; case 12: strsv_upper_kernel_12( dA, dB ); break; case 13: strsv_upper_kernel_13( dA, dB ); break; case 14: strsv_upper_kernel_14( dA, dB ); break; case 15: strsv_upper_kernel_15( dA, dB ); break; case 16: strsv_upper_kernel_16( dA, dB ); break; case 17: strsv_upper_kernel_17( dA, dB ); break; case 18: strsv_upper_kernel_18( dA, dB ); break; case 19: strsv_upper_kernel_19( dA, dB ); break; case 20: strsv_upper_kernel_20( dA, dB ); break; case 21: strsv_upper_kernel_21( dA, dB ); break; case 22: strsv_upper_kernel_22( dA, dB ); break; case 23: strsv_upper_kernel_23( dA, dB ); break; case 24: strsv_upper_kernel_24( dA, dB ); break; case 25: strsv_upper_kernel_25( dA, dB ); break; case 26: strsv_upper_kernel_26( dA, dB ); break; case 27: strsv_upper_kernel_27( dA, dB ); break; case 28: strsv_upper_kernel_28( dA, dB ); break; case 29: strsv_upper_kernel_29( dA, dB ); break; case 30: strsv_upper_kernel_30( dA, dB ); break; case 31: strsv_upper_kernel_31( dA, dB ); break; case 32: strsv_upper_kernel_32( dA, dB ); break; default: strsv_upper_kernel_general( dA, dB, sizes ); break; } } } #endif #endif /** Purpose ------- Does all triangular solves Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix Matrix in CSR format @param[in] LC magma_s_matrix same matrix, also CSR, but col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_smtrisolve_batched_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix LC, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; int blocksize1 = WARP_SIZE; int blocksize2 = 1; int dimgrid1 = min( int( sqrt( float( LC.num_rows ))), 65535 ); int dimgrid2 = min(magma_ceildiv( LC.num_rows, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( LC.num_rows, dimgrid1*dimgrid2 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); #if (TORCH_HIP_VERSION >= 7000) #if (CUDA_ARCH >= 300) if( uplotype == MagmaLower ){ //hipProfilerStart(); hipLaunchKernelGGL(( strsv_lower_kernel_switch), dim3(grid), dim3(block), 0, queue->cuda_stream() , trisystems, rhs, sizes, LC.num_rows ); //hipProfilerStop(); } else { hipLaunchKernelGGL(( strsv_upper_kernel_switch), dim3(grid), dim3(block), 0, queue->cuda_stream() , trisystems, rhs, sizes, LC.num_rows ); } #endif #endif return info; }
b6ca8b21f598f8dcfb2f2927b1cfaa778ed23870.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zgeisai_trsv.cu, normal z -> s, Thu Oct 8 23:05:52 2020 */ #include "magmasparse_internal.h" #include "shuffle.cuh" //#include <cuda_profiler_api.h> #define PRECISION_s #define REAL #define BLOCKSIZE 256 #define WARP_SIZE 32 #define WRP 32 #define WRQ 1 #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION >= 7000) #if (CUDA_ARCH >= 300) __device__ void strsv_lower_kernel_general(float *dA, float *dB, int *sizes) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (k = 0; k < N; k++) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = magmablas_sshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn > k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_upper_kernel_general(float *dA, float *dB, int *sizes) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB[ 2 ]; float rA[ 2 ]; int n; int k; int N = sizes[j]; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. #pragma unroll for (n = 0; n < 2; n++) rB[n] = dB[n*WARP_SIZE+idn]; // Triangular solve in regs. #pragma unroll for (int k = N-1; k > -1; k--) { #pragma unroll for (n = 0; n < 2; n++) rA[n] = dA[k*WARP_SIZE+n*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB[k/WARP_SIZE] /= rA[k/WARP_SIZE]; float top = magmablas_sshfl(rB[k/WARP_SIZE], k%WARP_SIZE); #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < k) rB[n] -= (top*rA[n]); } // Drop B to dev mem. #pragma unroll for (n = 0; n < 2; n++) if (n*WARP_SIZE+idn < N) dB[n*WARP_SIZE+idn] = rB[n]; #endif } __device__ void strsv_lower_kernel_1(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 1; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_2(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 2; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_3(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 3; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_4(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 4; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_5(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 5; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_6(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 6; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_7(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 7; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_8(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 8; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_9(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 9; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_10(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 10; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_11(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 11; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_12(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 12; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_13(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 13; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_14(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 14; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_15(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 15; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_16(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 16; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_17(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 17; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_18(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 18; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_19(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 19; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_20(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 20; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_21(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 21; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_22(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 22; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_23(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 23; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_24(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 24; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_25(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 25; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_26(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 26; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_27(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 27; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_28(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 28; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_29(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 29; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_30(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 30; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_31(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 31; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_lower_kernel_32(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 0; k < 32; k++) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float top = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn > k) rB -= (top*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_lower_kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_lower_kernel_1( dA, dB ); break; case 2: strsv_lower_kernel_2( dA, dB ); break; case 3: strsv_lower_kernel_3( dA, dB ); break; case 4: strsv_lower_kernel_4( dA, dB ); break; case 5: strsv_lower_kernel_5( dA, dB ); break; case 6: strsv_lower_kernel_6( dA, dB ); break; case 7: strsv_lower_kernel_7( dA, dB ); break; case 8: strsv_lower_kernel_8( dA, dB ); break; case 9: strsv_lower_kernel_9( dA, dB ); break; case 10: strsv_lower_kernel_10( dA, dB ); break; case 11: strsv_lower_kernel_11( dA, dB ); break; case 12: strsv_lower_kernel_12( dA, dB ); break; case 13: strsv_lower_kernel_13( dA, dB ); break; case 14: strsv_lower_kernel_14( dA, dB ); break; case 15: strsv_lower_kernel_15( dA, dB ); break; case 16: strsv_lower_kernel_16( dA, dB ); break; case 17: strsv_lower_kernel_17( dA, dB ); break; case 18: strsv_lower_kernel_18( dA, dB ); break; case 19: strsv_lower_kernel_19( dA, dB ); break; case 20: strsv_lower_kernel_20( dA, dB ); break; case 21: strsv_lower_kernel_21( dA, dB ); break; case 22: strsv_lower_kernel_22( dA, dB ); break; case 23: strsv_lower_kernel_23( dA, dB ); break; case 24: strsv_lower_kernel_24( dA, dB ); break; case 25: strsv_lower_kernel_25( dA, dB ); break; case 26: strsv_lower_kernel_26( dA, dB ); break; case 27: strsv_lower_kernel_27( dA, dB ); break; case 28: strsv_lower_kernel_28( dA, dB ); break; case 29: strsv_lower_kernel_29( dA, dB ); break; case 30: strsv_lower_kernel_30( dA, dB ); break; case 31: strsv_lower_kernel_31( dA, dB ); break; case 32: strsv_lower_kernel_32( dA, dB ); break; default: strsv_lower_kernel_general( dA, dB, sizes ); break; } } } __device__ void strsv_upper_kernel_1(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 1-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_2(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 2-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_3(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 3-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_4(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 4-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_5(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 5-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_6(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 6-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_7(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 7-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_8(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 8-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_9(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 9-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_10(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 10-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_11(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 11-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_12(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 12-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_13(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 13-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_14(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 14-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_15(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 15-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_16(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 16-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_17(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 17-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_18(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 18-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_19(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 19-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_20(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 20-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_21(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 21-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_22(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 22-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_23(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 23-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_24(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 24-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_25(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 25-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_26(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 26-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_27(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 27-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_28(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 28-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_29(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 29-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_30(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 30-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_31(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 31-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __device__ void strsv_upper_kernel_32(float *dA, float *dB ) { #ifdef REAL int j = blockIdx.y * gridDim.x + blockIdx.x; int idn = threadIdx.x; float rB; float rA; dA += (j)*WARP_SIZE*WARP_SIZE; dB += (j)*WARP_SIZE; // Read B to regs. rB = dB[idn]; // Triangular solve in regs. #pragma unroll for (int k = 32-1; k >-1; k--) { rA = dA[k*WARP_SIZE+idn]; if (k%WARP_SIZE == idn) rB /= rA; float bottom = magmablas_sshfl(rB, k%WARP_SIZE); if ( idn < k) rB -= (bottom*rA); } // Drop B to dev mem. dB[idn] = rB; #endif } __global__ void strsv_upper_kernel_switch(float *dA, float *dB, int *sizes, int num_rows ) { int j = blockIdx.y * gridDim.x + blockIdx.x; if (j < num_rows) { int N = sizes[j]; switch( N ) { case 1: strsv_upper_kernel_1( dA, dB ); break; case 2: strsv_upper_kernel_2( dA, dB ); break; case 3: strsv_upper_kernel_3( dA, dB ); break; case 4: strsv_upper_kernel_4( dA, dB ); break; case 5: strsv_upper_kernel_5( dA, dB ); break; case 6: strsv_upper_kernel_6( dA, dB ); break; case 7: strsv_upper_kernel_7( dA, dB ); break; case 8: strsv_upper_kernel_8( dA, dB ); break; case 9: strsv_upper_kernel_9( dA, dB ); break; case 10: strsv_upper_kernel_10( dA, dB ); break; case 11: strsv_upper_kernel_11( dA, dB ); break; case 12: strsv_upper_kernel_12( dA, dB ); break; case 13: strsv_upper_kernel_13( dA, dB ); break; case 14: strsv_upper_kernel_14( dA, dB ); break; case 15: strsv_upper_kernel_15( dA, dB ); break; case 16: strsv_upper_kernel_16( dA, dB ); break; case 17: strsv_upper_kernel_17( dA, dB ); break; case 18: strsv_upper_kernel_18( dA, dB ); break; case 19: strsv_upper_kernel_19( dA, dB ); break; case 20: strsv_upper_kernel_20( dA, dB ); break; case 21: strsv_upper_kernel_21( dA, dB ); break; case 22: strsv_upper_kernel_22( dA, dB ); break; case 23: strsv_upper_kernel_23( dA, dB ); break; case 24: strsv_upper_kernel_24( dA, dB ); break; case 25: strsv_upper_kernel_25( dA, dB ); break; case 26: strsv_upper_kernel_26( dA, dB ); break; case 27: strsv_upper_kernel_27( dA, dB ); break; case 28: strsv_upper_kernel_28( dA, dB ); break; case 29: strsv_upper_kernel_29( dA, dB ); break; case 30: strsv_upper_kernel_30( dA, dB ); break; case 31: strsv_upper_kernel_31( dA, dB ); break; case 32: strsv_upper_kernel_32( dA, dB ); break; default: strsv_upper_kernel_general( dA, dB, sizes ); break; } } } #endif #endif /** Purpose ------- Does all triangular solves Arguments --------- @param[in] uplotype magma_uplo_t lower or upper triangular @param[in] transtype magma_trans_t possibility for transposed matrix @param[in] diagtype magma_diag_t unit diagonal or not @param[in] L magma_s_matrix Matrix in CSR format @param[in] LC magma_s_matrix same matrix, also CSR, but col-major @param[out] sizes magma_int_t* Number of Elements that are replaced. @param[out] locations magma_int_t* Array indicating the locations. @param[out] trisystems float* trisystems @param[out] rhs float* right-hand sides @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_smtrisolve_batched_gpu( magma_uplo_t uplotype, magma_trans_t transtype, magma_diag_t diagtype, magma_s_matrix L, magma_s_matrix LC, magma_index_t *sizes, magma_index_t *locations, float *trisystems, float *rhs, magma_queue_t queue ) { magma_int_t info = 0; int blocksize1 = WARP_SIZE; int blocksize2 = 1; int dimgrid1 = min( int( sqrt( float( LC.num_rows ))), 65535 ); int dimgrid2 = min(magma_ceildiv( LC.num_rows, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( LC.num_rows, dimgrid1*dimgrid2 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); #if (CUDA_VERSION >= 7000) #if (CUDA_ARCH >= 300) if( uplotype == MagmaLower ){ //cudaProfilerStart(); strsv_lower_kernel_switch<<< grid, block, 0, queue->cuda_stream() >>>( trisystems, rhs, sizes, LC.num_rows ); //cudaProfilerStop(); } else { strsv_upper_kernel_switch<<< grid, block, 0, queue->cuda_stream() >>>( trisystems, rhs, sizes, LC.num_rows ); } #endif #endif return info; }
72d0b20d52a863d34e86df20572642e94768bb96.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <chrono> #include <hip/hip_runtime.h> // 2D block size #define BSIZE 16 // Tile size in the x direction #define XTILE 20 typedef double Real; __global__ void stencil3d( const Real*__restrict__ d_psi, Real*__restrict__ d_npsi, const Real*__restrict__ d_sigmaX, const Real*__restrict__ d_sigmaY, const Real*__restrict__ d_sigmaZ, int nx, int ny, int nz) { // z is the fastest varying direction __shared__ Real sm_psi[4][BSIZE][BSIZE]; #define V0(y,z) sm_psi[pii][y][z] #define V1(y,z) sm_psi[cii][y][z] #define V2(y,z) sm_psi[nii][y][z] #define sigmaX(x,y,z,dir) d_sigmaX[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define sigmaY(x,y,z,dir) d_sigmaY[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define sigmaZ(x,y,z,dir) d_sigmaZ[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define psi(x,y,z) d_psi[ z + nz * ( (y) + ny * (x) ) ] #define npsi(x,y,z) d_npsi[ z + nz * ( (y) + ny * (x) ) ] const int tjj = threadIdx.y; const int tkk = threadIdx.x; // shift for each tile by updating device pointers d_psi = &(psi(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z)); d_npsi = &(npsi(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z)); d_sigmaX = &(sigmaX(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); d_sigmaY = &(sigmaY(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); d_sigmaZ = &(sigmaZ(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); int nLast_x=XTILE+1; int nLast_y=(BSIZE-1); int nLast_z=(BSIZE-1); if (blockIdx.x == gridDim.x-1) nLast_x = nx-2 - XTILE * blockIdx.x + 1; if (blockIdx.y == gridDim.y-1) nLast_y = ny-2 - (BSIZE-2) * blockIdx.y + 1; if (blockIdx.z == gridDim.z-1) nLast_z = nz-2 - (BSIZE-2) * blockIdx.z + 1; if(tjj>nLast_y || tkk>nLast_z) return; // previous, current, next, and temp indices int pii,cii,nii,tii; pii=0; cii=1; nii=2; sm_psi[cii][tjj][tkk] = psi(0,tjj,tkk); sm_psi[nii][tjj][tkk] = psi(1,tjj,tkk); Real xcharge,ycharge,zcharge,dV = 0; __syncthreads(); //initial if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=-V1(tjj,tkk) + V2(tjj,tkk); Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.; Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.; dV -= sigmaX(1,tjj,tkk,0) * xd + sigmaX(1,tjj,tkk,1) * yd + sigmaX(1,tjj,tkk,2) * zd ; } tii=pii; pii=cii; cii=nii; nii=tii; for(int ii=1;ii<nLast_x;ii++) { sm_psi[nii][tjj][tkk] = psi(ii+1,tjj,tkk); __syncthreads(); // y face current if ((tkk>0) && (tkk<nLast_z) && (tjj<nLast_y)) { Real xd=(-V0(tjj,tkk) - V0(1 + tjj,tkk) + V2(tjj,tkk) + V2(1 + tjj,tkk))/4.; Real yd=-V1(tjj,tkk) + V1(1 + tjj,tkk); Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V1(1 + tjj,-1 + tkk) + V1(1 + tjj,1 + tkk))/4.; ycharge = sigmaY(ii,tjj+1,tkk,0) * xd + sigmaY(ii,tjj+1,tkk,1) * yd + sigmaY(ii,tjj+1,tkk,2) * zd ; dV += ycharge; sm_psi[3][tjj][tkk]=ycharge; } __syncthreads(); if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) dV -= sm_psi[3][tjj-1][tkk]; //bring from left __syncthreads(); // z face current if ((tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=(-V0(tjj,tkk) - V0(tjj,1 + tkk) + V2(tjj,tkk) + V2(tjj,1 + tkk))/4.; Real yd=(-V1(-1 + tjj,tkk) - V1(-1 + tjj,1 + tkk) + V1(1 + tjj,tkk) + V1(1 + tjj,1 + tkk))/4.; Real zd=-V1(tjj,tkk) + V1(tjj,1 + tkk); zcharge = sigmaZ(ii,tjj,tkk+1,0) * xd + sigmaZ(ii,tjj,tkk+1,1) * yd + sigmaZ(ii,tjj,tkk+1,2) * zd ; dV += zcharge; sm_psi[3][tjj][tkk]=zcharge; } __syncthreads(); if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) dV -= sm_psi[3][tjj][tkk-1]; __syncthreads(); // x face current if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=-V1(tjj,tkk) + V2(tjj,tkk); Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.; Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.; xcharge = sigmaX(ii+1,tjj,tkk,0) * xd + sigmaX(ii+1,tjj,tkk,1) * yd + sigmaX(ii+1,tjj,tkk,2) * zd ; dV += xcharge; npsi(ii,tjj,tkk) = dV; //store dV dV = -xcharge; //pass to the next cell in x-dir } __syncthreads(); tii=pii; pii=cii; cii=nii; nii=tii; } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <grid dimension> <repeat>\n", argv[0]); return 1; } const int size = atoi(argv[1]); const int repeat = atoi(argv[2]); const int nx = size; const int ny = size; const int nz = size; const int vol = nx * ny * nz; printf("Grid dimension: nx=%d ny=%d nz=%d\n",nx,ny,nz); Real *d_Vm, *d_dVm, *d_sigma; // allocate and initialize Vm hipMalloc((void**)&d_Vm, sizeof(Real)*vol); Real *h_Vm = (Real*) malloc (sizeof(Real)*vol); #define h_Vm(x,y,z) h_Vm[ z + nz * ( y + ny * ( x ) ) ] for(int ii=0;ii<nx;ii++) for(int jj=0;jj<ny;jj++) for(int kk=0;kk<nz;kk++) h_Vm(ii,jj,kk) = (ii*(ny*nz) + jj * nz + kk) % 19; hipMemcpy(d_Vm, h_Vm, sizeof(Real) * vol , hipMemcpyHostToDevice); // allocate and initialize sigma hipMalloc((void**)&d_sigma,sizeof(Real)*vol*9); Real *h_sigma = (Real*) malloc(sizeof(Real)*vol*9); for (int i = 0; i < vol*9; i++) h_sigma[i] = i % 19; hipMemcpy(d_sigma, h_sigma, sizeof(Real) * vol*9, hipMemcpyHostToDevice); // reset dVm hipMalloc((void**)&d_dVm,sizeof(Real)*vol); hipMemset(d_dVm, 0, sizeof(Real) * vol); //determine block sizes int bdimz = (nz-2)/(BSIZE-2) + ((nz-2)%(BSIZE-2)==0?0:1); int bdimy = (ny-2)/(BSIZE-2) + ((ny-2)%(BSIZE-2)==0?0:1); int bdimx = (nx-2)/XTILE + ((nx-2)%XTILE==0?0:1); dim3 grids (bdimx, bdimy, bdimz); dim3 blocks (BSIZE, BSIZE, 1); // warmup hipLaunchKernelGGL(( stencil3d) , dim3(grids), dim3(blocks) , 0, 0, d_Vm, d_dVm, d_sigma, d_sigma + 3*vol, d_sigma + 6*vol, nx, ny, nz); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( stencil3d) , dim3(grids), dim3(blocks) , 0, 0, d_Vm, d_dVm, d_sigma, d_sigma + 3*vol, d_sigma + 6*vol, nx, ny, nz); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); // read dVm Real *h_dVm = (Real*) malloc (sizeof(Real) * vol); hipMemcpy(h_dVm, d_dVm, vol*sizeof(Real), hipMemcpyDeviceToHost); #ifdef DUMP for(int ii=0;ii<nx;ii++) for(int jj=0;jj<ny;jj++) for(int kk=0;kk<nz;kk++) printf("dVm (%d,%d,%d)=%e\n",ii,jj,kk,h_dVm[kk+nz*(jj+ny*ii)]); #endif hipFree(d_Vm); hipFree(d_dVm); hipFree(d_sigma); free(h_sigma); free(h_Vm); free(h_dVm); return 0; }
72d0b20d52a863d34e86df20572642e94768bb96.cu
#include <stdio.h> #include <stdlib.h> #include <chrono> #include <cuda.h> // 2D block size #define BSIZE 16 // Tile size in the x direction #define XTILE 20 typedef double Real; __global__ void stencil3d( const Real*__restrict__ d_psi, Real*__restrict__ d_npsi, const Real*__restrict__ d_sigmaX, const Real*__restrict__ d_sigmaY, const Real*__restrict__ d_sigmaZ, int nx, int ny, int nz) { // z is the fastest varying direction __shared__ Real sm_psi[4][BSIZE][BSIZE]; #define V0(y,z) sm_psi[pii][y][z] #define V1(y,z) sm_psi[cii][y][z] #define V2(y,z) sm_psi[nii][y][z] #define sigmaX(x,y,z,dir) d_sigmaX[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define sigmaY(x,y,z,dir) d_sigmaY[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define sigmaZ(x,y,z,dir) d_sigmaZ[ z + nz * ( y + ny * ( x + nx * dir ) ) ] #define psi(x,y,z) d_psi[ z + nz * ( (y) + ny * (x) ) ] #define npsi(x,y,z) d_npsi[ z + nz * ( (y) + ny * (x) ) ] const int tjj = threadIdx.y; const int tkk = threadIdx.x; // shift for each tile by updating device pointers d_psi = &(psi(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z)); d_npsi = &(npsi(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z)); d_sigmaX = &(sigmaX(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); d_sigmaY = &(sigmaY(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); d_sigmaZ = &(sigmaZ(XTILE*blockIdx.x, (BSIZE-2)*blockIdx.y, (BSIZE-2)*blockIdx.z, 0)); int nLast_x=XTILE+1; int nLast_y=(BSIZE-1); int nLast_z=(BSIZE-1); if (blockIdx.x == gridDim.x-1) nLast_x = nx-2 - XTILE * blockIdx.x + 1; if (blockIdx.y == gridDim.y-1) nLast_y = ny-2 - (BSIZE-2) * blockIdx.y + 1; if (blockIdx.z == gridDim.z-1) nLast_z = nz-2 - (BSIZE-2) * blockIdx.z + 1; if(tjj>nLast_y || tkk>nLast_z) return; // previous, current, next, and temp indices int pii,cii,nii,tii; pii=0; cii=1; nii=2; sm_psi[cii][tjj][tkk] = psi(0,tjj,tkk); sm_psi[nii][tjj][tkk] = psi(1,tjj,tkk); Real xcharge,ycharge,zcharge,dV = 0; __syncthreads(); //initial if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=-V1(tjj,tkk) + V2(tjj,tkk); Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.; Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.; dV -= sigmaX(1,tjj,tkk,0) * xd + sigmaX(1,tjj,tkk,1) * yd + sigmaX(1,tjj,tkk,2) * zd ; } tii=pii; pii=cii; cii=nii; nii=tii; for(int ii=1;ii<nLast_x;ii++) { sm_psi[nii][tjj][tkk] = psi(ii+1,tjj,tkk); __syncthreads(); // y face current if ((tkk>0) && (tkk<nLast_z) && (tjj<nLast_y)) { Real xd=(-V0(tjj,tkk) - V0(1 + tjj,tkk) + V2(tjj,tkk) + V2(1 + tjj,tkk))/4.; Real yd=-V1(tjj,tkk) + V1(1 + tjj,tkk); Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V1(1 + tjj,-1 + tkk) + V1(1 + tjj,1 + tkk))/4.; ycharge = sigmaY(ii,tjj+1,tkk,0) * xd + sigmaY(ii,tjj+1,tkk,1) * yd + sigmaY(ii,tjj+1,tkk,2) * zd ; dV += ycharge; sm_psi[3][tjj][tkk]=ycharge; } __syncthreads(); if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) dV -= sm_psi[3][tjj-1][tkk]; //bring from left __syncthreads(); // z face current if ((tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=(-V0(tjj,tkk) - V0(tjj,1 + tkk) + V2(tjj,tkk) + V2(tjj,1 + tkk))/4.; Real yd=(-V1(-1 + tjj,tkk) - V1(-1 + tjj,1 + tkk) + V1(1 + tjj,tkk) + V1(1 + tjj,1 + tkk))/4.; Real zd=-V1(tjj,tkk) + V1(tjj,1 + tkk); zcharge = sigmaZ(ii,tjj,tkk+1,0) * xd + sigmaZ(ii,tjj,tkk+1,1) * yd + sigmaZ(ii,tjj,tkk+1,2) * zd ; dV += zcharge; sm_psi[3][tjj][tkk]=zcharge; } __syncthreads(); if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) dV -= sm_psi[3][tjj][tkk-1]; __syncthreads(); // x face current if ((tkk>0) && (tkk<nLast_z) && (tjj>0) && (tjj<nLast_y)) { Real xd=-V1(tjj,tkk) + V2(tjj,tkk); Real yd=(-V1(-1 + tjj,tkk) + V1(1 + tjj,tkk) - V2(-1 + tjj,tkk) + V2(1 + tjj,tkk))/4.; Real zd=(-V1(tjj,-1 + tkk) + V1(tjj,1 + tkk) - V2(tjj,-1 + tkk) + V2(tjj,1 + tkk))/4.; xcharge = sigmaX(ii+1,tjj,tkk,0) * xd + sigmaX(ii+1,tjj,tkk,1) * yd + sigmaX(ii+1,tjj,tkk,2) * zd ; dV += xcharge; npsi(ii,tjj,tkk) = dV; //store dV dV = -xcharge; //pass to the next cell in x-dir } __syncthreads(); tii=pii; pii=cii; cii=nii; nii=tii; } } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <grid dimension> <repeat>\n", argv[0]); return 1; } const int size = atoi(argv[1]); const int repeat = atoi(argv[2]); const int nx = size; const int ny = size; const int nz = size; const int vol = nx * ny * nz; printf("Grid dimension: nx=%d ny=%d nz=%d\n",nx,ny,nz); Real *d_Vm, *d_dVm, *d_sigma; // allocate and initialize Vm cudaMalloc((void**)&d_Vm, sizeof(Real)*vol); Real *h_Vm = (Real*) malloc (sizeof(Real)*vol); #define h_Vm(x,y,z) h_Vm[ z + nz * ( y + ny * ( x ) ) ] for(int ii=0;ii<nx;ii++) for(int jj=0;jj<ny;jj++) for(int kk=0;kk<nz;kk++) h_Vm(ii,jj,kk) = (ii*(ny*nz) + jj * nz + kk) % 19; cudaMemcpy(d_Vm, h_Vm, sizeof(Real) * vol , cudaMemcpyHostToDevice); // allocate and initialize sigma cudaMalloc((void**)&d_sigma,sizeof(Real)*vol*9); Real *h_sigma = (Real*) malloc(sizeof(Real)*vol*9); for (int i = 0; i < vol*9; i++) h_sigma[i] = i % 19; cudaMemcpy(d_sigma, h_sigma, sizeof(Real) * vol*9, cudaMemcpyHostToDevice); // reset dVm cudaMalloc((void**)&d_dVm,sizeof(Real)*vol); cudaMemset(d_dVm, 0, sizeof(Real) * vol); //determine block sizes int bdimz = (nz-2)/(BSIZE-2) + ((nz-2)%(BSIZE-2)==0?0:1); int bdimy = (ny-2)/(BSIZE-2) + ((ny-2)%(BSIZE-2)==0?0:1); int bdimx = (nx-2)/XTILE + ((nx-2)%XTILE==0?0:1); dim3 grids (bdimx, bdimy, bdimz); dim3 blocks (BSIZE, BSIZE, 1); // warmup stencil3d <<< grids, blocks >>> ( d_Vm, d_dVm, d_sigma, d_sigma + 3*vol, d_sigma + 6*vol, nx, ny, nz); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) stencil3d <<< grids, blocks >>> ( d_Vm, d_dVm, d_sigma, d_sigma + 3*vol, d_sigma + 6*vol, nx, ny, nz); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat); // read dVm Real *h_dVm = (Real*) malloc (sizeof(Real) * vol); cudaMemcpy(h_dVm, d_dVm, vol*sizeof(Real), cudaMemcpyDeviceToHost); #ifdef DUMP for(int ii=0;ii<nx;ii++) for(int jj=0;jj<ny;jj++) for(int kk=0;kk<nz;kk++) printf("dVm (%d,%d,%d)=%e\n",ii,jj,kk,h_dVm[kk+nz*(jj+ny*ii)]); #endif cudaFree(d_Vm); cudaFree(d_dVm); cudaFree(d_sigma); free(h_sigma); free(h_Vm); free(h_dVm); return 0; }
a5f0925ded3838b062f08d76a13be536f17ae99b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> // cugraph::test::create_memory_resource() #include <utilities/high_res_timer.hpp> #include <utilities/test_utilities.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <sampling/random_walks.cuh> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include <rmm/thrust_rmm_allocator.h> #include <hip/hip_runtime_api.h> #include <thrust/random.h> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <vector> /** * @internal * @brief Populates the device vector d_start with the starting vertex indices * to be used for each RW path specified. */ template <typename vertex_t, typename index_t> void fill_start(raft::handle_t const& handle, rmm::device_uvector<vertex_t>& d_start, index_t num_vertices) { index_t num_paths = d_start.size(); thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(num_paths), d_start.begin(), [num_vertices] __device__(auto indx) { return indx % num_vertices; }); } /** * @internal * @brief Calls the random_walks algorithm and displays the time metrics (total * time for all requested paths, average time for each path). */ template <typename graph_vt> void output_random_walks_time(graph_vt const& graph_view, typename graph_vt::edge_type num_paths) { using vertex_t = typename graph_vt::vertex_type; using edge_t = typename graph_vt::edge_type; using weight_t = typename graph_vt::weight_type; raft::handle_t handle{}; rmm::device_uvector<vertex_t> d_start(num_paths, handle.get_stream()); vertex_t num_vertices = graph_view.get_number_of_vertices(); fill_start(handle, d_start, num_vertices); // 0-copy const device view: // cugraph::experimental::detail::device_const_vector_view<vertex_t, edge_t> d_start_view{ d_start.data(), num_paths}; edge_t max_depth{10}; HighResTimer hr_timer; std::string label("RandomWalks"); hr_timer.start(label); hipProfilerStart(); auto ret_tuple = cugraph::experimental::detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); hipProfilerStop(); hr_timer.stop(); try { auto runtime = hr_timer.get_average_runtime(label); std::cout << "RW for num_paths: " << num_paths << ", runtime [ms] / path: " << runtime / num_paths << ":\n"; } catch (std::exception const& ex) { std::cerr << ex.what() << '\n'; return; } catch (...) { std::cerr << "ERROR: Unknown exception on timer label search." << '\n'; return; } hr_timer.display(std::cout); } /** * @struct RandomWalks_Usecase * @brief Used to specify input to a random_walks benchmark/profile run * * @var RandomWalks_Usecase::graph_file_full_path Computed during construction * to be an absolute path consisting of the value of the RAPIDS_DATASET_ROOT_DIR * env var and the graph_file_path constructor arg. This is initialized to an * empty string. * * @var RandomWalks_Usecase::test_weighted Bool representing if the specified * graph is weighted or not. This is initialized to false (unweighted). */ struct RandomWalks_Usecase { std::string graph_file_full_path{}; bool test_weighted{false}; RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; }; /** * @brief Runs random_walks on a specified input and outputs time metrics * * Creates a graph_t instance from the configuration specified in the * RandomWalks_Usecase instance passed in (currently by reading a dataset to * populate the graph_t), then runs random_walks to generate 1, 10, and 100 * random paths and output statistics for each. * * @tparam vertex_t Type of vertex identifiers. * @tparam edge_t Type of edge identifiers. * @tparam weight_t Type of weight identifiers. * * @param[in] configuration RandomWalks_Usecase instance containing the input * file to read for constructing the graph_t. */ template <typename vertex_t, typename edge_t, typename weight_t> void run(RandomWalks_Usecase const& configuration) { raft::handle_t handle{}; cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle); std::tie(graph, std::ignore) = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>( handle, configuration.graph_file_full_path, configuration.test_weighted, false); auto graph_view = graph.view(); // FIXME: the num_paths vector might be better specified via the // configuration input instead of hardcoding here. std::vector<edge_t> v_np{1, 10, 100}; for (auto&& num_paths : v_np) { output_random_walks_time(graph_view, num_paths); } } /** * @brief Performs the random_walks benchmark/profiling run * * main function for performing the random_walks benchmark/profiling run. The * resulting executable takes the following options: "rmm_mode" which can be one * of "binning", "cuda", "pool", or "managed. "dataset" which is a path * relative to the env var RAPIDS_DATASET_ROOT_DIR to a input .mtx file to use * to populate the graph_t instance. * * To use the default values of rmm_mode=pool and * dataset=test/datasets/karate.mtx: * @code * RANDOM_WALKS_PROFILING * @endcode * * To specify managed memory and the netscience.mtx dataset (relative to a * particular RAPIDS_DATASET_ROOT_DIR setting): * @code * RANDOM_WALKS_PROFILING --rmm_mode=managed --dataset=test/datasets/netscience.mtx * @endcode * * @return An int representing a successful run. 0 indicates success. */ int main(int argc, char** argv) { // Add command-line processing, provide defaults cxxopts::Options options(argv[0], " - Random Walks benchmark command line options"); options.add_options()( "rmm_mode", "RMM allocation mode", cxxopts::value<std::string>()->default_value("pool")); options.add_options()( "dataset", "dataset", cxxopts::value<std::string>()->default_value("test/datasets/karate.mtx")); auto const cmd_options = options.parse(argc, argv); auto const rmm_mode = cmd_options["rmm_mode"].as<std::string>(); auto const dataset = cmd_options["dataset"].as<std::string>(); // Configure RMM auto resource = cugraph::test::create_memory_resource(rmm_mode); rmm::mr::set_current_device_resource(resource.get()); // Run benchmarks std::cout << "Using dataset: " << dataset << std::endl; run<int32_t, int32_t, float>(RandomWalks_Usecase(dataset, true)); // FIXME: consider returning non-zero for situations that warrant it (eg. if // the algo ran but the results are invalid, if a benchmark threshold is // exceeded, etc.) return 0; }
a5f0925ded3838b062f08d76a13be536f17ae99b.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> // cugraph::test::create_memory_resource() #include <utilities/high_res_timer.hpp> #include <utilities/test_utilities.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <sampling/random_walks.cuh> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include <rmm/thrust_rmm_allocator.h> #include <cuda_profiler_api.h> #include <thrust/random.h> #include <algorithm> #include <iterator> #include <limits> #include <numeric> #include <vector> /** * @internal * @brief Populates the device vector d_start with the starting vertex indices * to be used for each RW path specified. */ template <typename vertex_t, typename index_t> void fill_start(raft::handle_t const& handle, rmm::device_uvector<vertex_t>& d_start, index_t num_vertices) { index_t num_paths = d_start.size(); thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(num_paths), d_start.begin(), [num_vertices] __device__(auto indx) { return indx % num_vertices; }); } /** * @internal * @brief Calls the random_walks algorithm and displays the time metrics (total * time for all requested paths, average time for each path). */ template <typename graph_vt> void output_random_walks_time(graph_vt const& graph_view, typename graph_vt::edge_type num_paths) { using vertex_t = typename graph_vt::vertex_type; using edge_t = typename graph_vt::edge_type; using weight_t = typename graph_vt::weight_type; raft::handle_t handle{}; rmm::device_uvector<vertex_t> d_start(num_paths, handle.get_stream()); vertex_t num_vertices = graph_view.get_number_of_vertices(); fill_start(handle, d_start, num_vertices); // 0-copy const device view: // cugraph::experimental::detail::device_const_vector_view<vertex_t, edge_t> d_start_view{ d_start.data(), num_paths}; edge_t max_depth{10}; HighResTimer hr_timer; std::string label("RandomWalks"); hr_timer.start(label); cudaProfilerStart(); auto ret_tuple = cugraph::experimental::detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); cudaProfilerStop(); hr_timer.stop(); try { auto runtime = hr_timer.get_average_runtime(label); std::cout << "RW for num_paths: " << num_paths << ", runtime [ms] / path: " << runtime / num_paths << ":\n"; } catch (std::exception const& ex) { std::cerr << ex.what() << '\n'; return; } catch (...) { std::cerr << "ERROR: Unknown exception on timer label search." << '\n'; return; } hr_timer.display(std::cout); } /** * @struct RandomWalks_Usecase * @brief Used to specify input to a random_walks benchmark/profile run * * @var RandomWalks_Usecase::graph_file_full_path Computed during construction * to be an absolute path consisting of the value of the RAPIDS_DATASET_ROOT_DIR * env var and the graph_file_path constructor arg. This is initialized to an * empty string. * * @var RandomWalks_Usecase::test_weighted Bool representing if the specified * graph is weighted or not. This is initialized to false (unweighted). */ struct RandomWalks_Usecase { std::string graph_file_full_path{}; bool test_weighted{false}; RandomWalks_Usecase(std::string const& graph_file_path, bool test_weighted) : test_weighted(test_weighted) { if ((graph_file_path.length() > 0) && (graph_file_path[0] != '/')) { graph_file_full_path = cugraph::test::get_rapids_dataset_root_dir() + "/" + graph_file_path; } else { graph_file_full_path = graph_file_path; } }; }; /** * @brief Runs random_walks on a specified input and outputs time metrics * * Creates a graph_t instance from the configuration specified in the * RandomWalks_Usecase instance passed in (currently by reading a dataset to * populate the graph_t), then runs random_walks to generate 1, 10, and 100 * random paths and output statistics for each. * * @tparam vertex_t Type of vertex identifiers. * @tparam edge_t Type of edge identifiers. * @tparam weight_t Type of weight identifiers. * * @param[in] configuration RandomWalks_Usecase instance containing the input * file to read for constructing the graph_t. */ template <typename vertex_t, typename edge_t, typename weight_t> void run(RandomWalks_Usecase const& configuration) { raft::handle_t handle{}; cugraph::experimental::graph_t<vertex_t, edge_t, weight_t, false, false> graph(handle); std::tie(graph, std::ignore) = cugraph::test::read_graph_from_matrix_market_file<vertex_t, edge_t, weight_t, false, false>( handle, configuration.graph_file_full_path, configuration.test_weighted, false); auto graph_view = graph.view(); // FIXME: the num_paths vector might be better specified via the // configuration input instead of hardcoding here. std::vector<edge_t> v_np{1, 10, 100}; for (auto&& num_paths : v_np) { output_random_walks_time(graph_view, num_paths); } } /** * @brief Performs the random_walks benchmark/profiling run * * main function for performing the random_walks benchmark/profiling run. The * resulting executable takes the following options: "rmm_mode" which can be one * of "binning", "cuda", "pool", or "managed. "dataset" which is a path * relative to the env var RAPIDS_DATASET_ROOT_DIR to a input .mtx file to use * to populate the graph_t instance. * * To use the default values of rmm_mode=pool and * dataset=test/datasets/karate.mtx: * @code * RANDOM_WALKS_PROFILING * @endcode * * To specify managed memory and the netscience.mtx dataset (relative to a * particular RAPIDS_DATASET_ROOT_DIR setting): * @code * RANDOM_WALKS_PROFILING --rmm_mode=managed --dataset=test/datasets/netscience.mtx * @endcode * * @return An int representing a successful run. 0 indicates success. */ int main(int argc, char** argv) { // Add command-line processing, provide defaults cxxopts::Options options(argv[0], " - Random Walks benchmark command line options"); options.add_options()( "rmm_mode", "RMM allocation mode", cxxopts::value<std::string>()->default_value("pool")); options.add_options()( "dataset", "dataset", cxxopts::value<std::string>()->default_value("test/datasets/karate.mtx")); auto const cmd_options = options.parse(argc, argv); auto const rmm_mode = cmd_options["rmm_mode"].as<std::string>(); auto const dataset = cmd_options["dataset"].as<std::string>(); // Configure RMM auto resource = cugraph::test::create_memory_resource(rmm_mode); rmm::mr::set_current_device_resource(resource.get()); // Run benchmarks std::cout << "Using dataset: " << dataset << std::endl; run<int32_t, int32_t, float>(RandomWalks_Usecase(dataset, true)); // FIXME: consider returning non-zero for situations that warrant it (eg. if // the algo ran but the results are invalid, if a benchmark threshold is // exceeded, etc.) return 0; }
322d7608126ca772097e99339ba33770a34de5e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <fstream> #include "renderer.hh" #include "vector3.hh" #include "util.hh" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } int main() { // Render Settings int width = 1280; int height = 720; // Cuda Settings int tx = 8; // BlockX size int ty = 8; // BlockY size int num_pixels = width * height; int buffer_size = num_pixels * sizeof(Color); // Allocate FrameBuffer Color* frameBuffer; checkCudaErrors(hipMallocManaged((void **)&frameBuffer, buffer_size)); // Allocate Random States hiprandState_t* d_rand_state; checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t))); // Creating Scene Scene** scene; checkCudaErrors(hipMalloc((void **)&scene, sizeof(Scene*))); Renderer** renderer; checkCudaErrors(hipMalloc((void **)&renderer, sizeof(Renderer*))); Camera** camera; checkCudaErrors(hipMalloc((void **)&camera, sizeof(Camera*))); hipLaunchKernelGGL(( setupScene), dim3(1),dim3(1), 0, 0, renderer, scene, camera, width, height); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // Setting Up Rendering dim3 blocks(width/tx+1,height/ty+1); dim3 threads(tx,ty); hipLaunchKernelGGL(( random_init), dim3(blocks), dim3(threads), 0, 0, width, height, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // Render our buffer std::cerr << "Rendering a " << width << "x" << height << " image with "; std::cerr << "in " << tx << "x" << ty << " blocks." << std::endl; clock_t start, stop; start = clock(); hipLaunchKernelGGL(( renderScene), dim3(blocks), dim3(threads), 0, 0, frameBuffer, renderer, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds." << std::endl; std::ofstream file{"file.ppm", std::ios::out | std::ios::binary}; file << "P3\n" << width << " " << height << "\n255\n"; for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { size_t pixel_index = j * width + i; auto color = frameBuffer[pixel_index]; //color = color.clamp(); int ir = int(color.r); int ig = int(color.g); int ib = int(color.b); file << ir << " " << ig << " " << ib << "\n"; } } // Free render buffers checkCudaErrors(hipFree(d_rand_state)); checkCudaErrors(hipFree(frameBuffer)); // Free scene // Missing: kernel to free inside scene pointers checkCudaErrors(hipFree(scene)); checkCudaErrors(hipFree(renderer)); checkCudaErrors(hipFree(camera)); // Reset Device hipDeviceReset(); }
322d7608126ca772097e99339ba33770a34de5e5.cu
#include <iostream> #include <time.h> #include <fstream> #include "renderer.hh" #include "vector3.hh" #include "util.hh" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } int main() { // Render Settings int width = 1280; int height = 720; // Cuda Settings int tx = 8; // BlockX size int ty = 8; // BlockY size int num_pixels = width * height; int buffer_size = num_pixels * sizeof(Color); // Allocate FrameBuffer Color* frameBuffer; checkCudaErrors(cudaMallocManaged((void **)&frameBuffer, buffer_size)); // Allocate Random States curandState* d_rand_state; checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState))); // Creating Scene Scene** scene; checkCudaErrors(cudaMalloc((void **)&scene, sizeof(Scene*))); Renderer** renderer; checkCudaErrors(cudaMalloc((void **)&renderer, sizeof(Renderer*))); Camera** camera; checkCudaErrors(cudaMalloc((void **)&camera, sizeof(Camera*))); setupScene<<<1,1>>>(renderer, scene, camera, width, height); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // Setting Up Rendering dim3 blocks(width/tx+1,height/ty+1); dim3 threads(tx,ty); random_init<<<blocks, threads>>>(width, height, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // Render our buffer std::cerr << "Rendering a " << width << "x" << height << " image with "; std::cerr << "in " << tx << "x" << ty << " blocks." << std::endl; clock_t start, stop; start = clock(); renderScene<<<blocks, threads>>>(frameBuffer, renderer, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds." << std::endl; std::ofstream file{"file.ppm", std::ios::out | std::ios::binary}; file << "P3\n" << width << " " << height << "\n255\n"; for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { size_t pixel_index = j * width + i; auto color = frameBuffer[pixel_index]; //color = color.clamp(); int ir = int(color.r); int ig = int(color.g); int ib = int(color.b); file << ir << " " << ig << " " << ib << "\n"; } } // Free render buffers checkCudaErrors(cudaFree(d_rand_state)); checkCudaErrors(cudaFree(frameBuffer)); // Free scene // Missing: kernel to free inside scene pointers checkCudaErrors(cudaFree(scene)); checkCudaErrors(cudaFree(renderer)); checkCudaErrors(cudaFree(camera)); // Reset Device cudaDeviceReset(); }
42a3d4ed9a375de2f70e468da01b432aebd7872b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define ARRAY_COUNT 10 __shared__ float file_shared_array_static[ARRAY_COUNT]; extern __shared__ float file_shared_array_dynamic[]; __global__ void generateArrayStatic(float* out) { __shared__ float array[ARRAY_COUNT]; int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { array[idx] = idx; out[idx] = array[idx]; } } __global__ void generateArrayDynamic(float* out) { extern __shared__ float array[]; int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { array[idx] = idx; out[idx] = array[idx]; } } __global__ void generateArrayFileStatic(float* out) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { file_shared_array_static[idx] = idx; out[idx] = file_shared_array_static[idx]; } } __global__ void generateArrayFileDynamic(float* out) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { file_shared_array_dynamic[idx] = idx; out[idx] = file_shared_array_dynamic[idx]; } } void printArray(float* array) { for (int x = 0; x < ARRAY_COUNT; x++) { printf("%d ", int(array[x])); } printf("\n"); } int main(void) { printf("\n"); dim3 block(32); dim3 grid((ARRAY_COUNT+block.x-1)/block.x); float* host_array = (float*)malloc(ARRAY_COUNT*sizeof(float)); float* device_array; hipMalloc((float**)&device_array, ARRAY_COUNT*sizeof(float)); hipLaunchKernelGGL(( generateArrayStatic), dim3(grid),dim3(block), 0, 0, device_array); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), hipMemcpyDeviceToHost); printf("%-30s", "generateArrayStatic: "); printArray(host_array); hipLaunchKernelGGL(( generateArrayDynamic), dim3(grid),dim3(block),ARRAY_COUNT*sizeof(float), 0, device_array); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), hipMemcpyDeviceToHost); printf("%-30s", "generateArrayDynamic: "); printArray(host_array); hipLaunchKernelGGL(( generateArrayFileStatic), dim3(grid),dim3(block), 0, 0, device_array); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), hipMemcpyDeviceToHost); printf("%-30s", "generateArrayFileStatic: "); printArray(host_array); hipLaunchKernelGGL(( generateArrayFileDynamic), dim3(grid),dim3(block),ARRAY_COUNT*sizeof(float), 0, device_array); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), hipMemcpyDeviceToHost); printf("%-30s", "generateArrayFileDynamic: "); printArray(host_array); free(host_array); hipFree(device_array); hipDeviceReset(); printf("\n"); return 0; }
42a3d4ed9a375de2f70e468da01b432aebd7872b.cu
#include <stdio.h> #define ARRAY_COUNT 10 __shared__ float file_shared_array_static[ARRAY_COUNT]; extern __shared__ float file_shared_array_dynamic[]; __global__ void generateArrayStatic(float* out) { __shared__ float array[ARRAY_COUNT]; int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { array[idx] = idx; out[idx] = array[idx]; } } __global__ void generateArrayDynamic(float* out) { extern __shared__ float array[]; int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { array[idx] = idx; out[idx] = array[idx]; } } __global__ void generateArrayFileStatic(float* out) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { file_shared_array_static[idx] = idx; out[idx] = file_shared_array_static[idx]; } } __global__ void generateArrayFileDynamic(float* out) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx < ARRAY_COUNT) { file_shared_array_dynamic[idx] = idx; out[idx] = file_shared_array_dynamic[idx]; } } void printArray(float* array) { for (int x = 0; x < ARRAY_COUNT; x++) { printf("%d ", int(array[x])); } printf("\n"); } int main(void) { printf("\n"); dim3 block(32); dim3 grid((ARRAY_COUNT+block.x-1)/block.x); float* host_array = (float*)malloc(ARRAY_COUNT*sizeof(float)); float* device_array; cudaMalloc((float**)&device_array, ARRAY_COUNT*sizeof(float)); generateArrayStatic<<<grid,block>>>(device_array); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost); printf("%-30s", "generateArrayStatic: "); printArray(host_array); generateArrayDynamic<<<grid,block,ARRAY_COUNT*sizeof(float)>>>(device_array); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost); printf("%-30s", "generateArrayDynamic: "); printArray(host_array); generateArrayFileStatic<<<grid,block>>>(device_array); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost); printf("%-30s", "generateArrayFileStatic: "); printArray(host_array); generateArrayFileDynamic<<<grid,block,ARRAY_COUNT*sizeof(float)>>>(device_array); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost); printf("%-30s", "generateArrayFileDynamic: "); printArray(host_array); free(host_array); cudaFree(device_array); cudaDeviceReset(); printf("\n"); return 0; }
5f15c85cb3d16b41fe2f056f32bf8296290ecc12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "hamiltonian.h" #include "model.h" #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY void Hamiltonian::initialize_gpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; grid_size = (model.number_of_atoms - 1) / BLOCK_SIZE + 1; CHECK(hipMalloc((void**)&neighbor_number, sizeof(int) * n)); CHECK(hipMalloc((void**)&neighbor_list, sizeof(int) * model.number_of_pairs)); CHECK(hipMalloc((void**)&potential, sizeof(real) * n)); CHECK(hipMalloc((void**)&hopping_real, sizeof(real) * model.number_of_pairs)); CHECK(hipMalloc((void**)&hopping_imag, sizeof(real) * model.number_of_pairs)); CHECK(hipMalloc((void**)&xx, sizeof(real) * model.number_of_pairs)); CHECK( hipMemcpy(neighbor_number, model.neighbor_number, sizeof(int) * n, hipMemcpyHostToDevice)); delete[] model.neighbor_number; CHECK(hipMemcpy(potential, model.potential, sizeof(real) * n, hipMemcpyHostToDevice)); delete[] model.potential; int* neighbor_list_new = new int[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { neighbor_list_new[m * n + i] = model.neighbor_list[i * max_neighbor + m]; } } delete[] model.neighbor_list; CHECK(hipMemcpy( neighbor_list, neighbor_list_new, sizeof(int) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] neighbor_list_new; real* hopping_real_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_real_new[m * n + i] = model.hopping_real[i * max_neighbor + m]; } } delete[] model.hopping_real; CHECK(hipMemcpy( hopping_real, hopping_real_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] hopping_real_new; real* hopping_imag_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_imag_new[m * n + i] = model.hopping_imag[i * max_neighbor + m]; } } delete[] model.hopping_imag; CHECK(hipMemcpy( hopping_imag, hopping_imag_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] hopping_imag_new; real* xx_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { xx_new[m * n + i] = model.xx[i * max_neighbor + m]; } } delete[] model.xx; CHECK(hipMemcpy(xx, xx_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] xx_new; } #else void Hamiltonian::initialize_cpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; int number_of_pairs = model.number_of_pairs; neighbor_number = new int[n]; memcpy(neighbor_number, model.neighbor_number, sizeof(int) * n); delete[] model.neighbor_number; neighbor_list = new int[number_of_pairs]; memcpy(neighbor_list, model.neighbor_list, sizeof(int) * number_of_pairs); delete[] model.neighbor_list; potential = new real[n]; memcpy(potential, model.potential, sizeof(real) * n); delete[] model.potential; hopping_real = new real[number_of_pairs]; memcpy(hopping_real, model.hopping_real, sizeof(real) * number_of_pairs); delete[] model.hopping_real; hopping_imag = new real[number_of_pairs]; memcpy(hopping_imag, model.hopping_imag, sizeof(real) * number_of_pairs); delete[] model.hopping_imag; xx = new real[number_of_pairs]; memcpy(xx, model.xx, sizeof(real) * number_of_pairs); delete[] model.xx; } #endif Hamiltonian::Hamiltonian(Model& model) { #ifndef CPU_ONLY initialize_gpu(model); #else initialize_cpu(model); #endif } Hamiltonian::~Hamiltonian() { #ifndef CPU_ONLY CHECK(hipFree(neighbor_number)); CHECK(hipFree(neighbor_list)); CHECK(hipFree(potential)); CHECK(hipFree(hopping_real)); CHECK(hipFree(hopping_imag)); CHECK(hipFree(xx)); #else delete[] neighbor_number; delete[] neighbor_list; delete[] potential; delete[] hopping_real; delete[] hopping_imag; delete[] xx; #endif } #ifndef CPU_ONLY __global__ void gpu_apply_hamiltonian( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_in_real, const real* __restrict__ g_state_in_imag, real* __restrict__ g_state_out_real, real* __restrict__ g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #else void cpu_apply_hamiltonian( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #endif // |output> = H |input> void Hamiltonian::apply(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_hamiltonian, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_hamiltonian( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_commutator( int number_of_atoms, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #else void cpu_apply_commutator( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #endif // |output> = [X, H] |input> void Hamiltonian::apply_commutator(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_commutator, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_commutator( n, max_neighbor, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_current( const int number_of_atoms, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_xx, const real* __restrict__ g_state_in_real, const real* __restrict__ g_state_in_imag, real* __restrict__ g_state_out_real, real* __restrict__ g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #else void cpu_apply_current( int number_of_atoms, int max_neighbor, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #endif // |output> = V |input> void Hamiltonian::apply_current(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_current, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_current( n, max_neighbor, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } // Kernel which calculates the two first terms of time evolution as described by // Eq. (36) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_01( const int number_of_atoms, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real b0, const real b1, const int direction) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #else void cpu_chebyshev_01( int number_of_atoms, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_real, real* g_state_imag, real b0, real b1, int direction) { for (int n = 0; n < number_of_atoms; ++n) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_01( Vector& state_0, Vector& state_1, Vector& state, real bessel_0, real bessel_1, int direction) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_01, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); CHECK(hipGetLastError()); #else cpu_chebyshev_01( n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); #endif } // Kernel for calculating further terms of Eq. (36) // in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_2( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real bessel_m, const int label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_chebyshev_2( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_real, real* g_state_imag, real bessel_m, int label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2( Vector& state_0, Vector& state_1, Vector& state_2, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_2, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(hipGetLastError()); #else cpu_chebyshev_2( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel which calculates the two first terms of commutator [X, U(dt)] // Corresponds to Eq. (37) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_1x( const int number_of_atoms, const real* __restrict__ g_state_1x_real, const real* __restrict__ g_state_1x_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real g_bessel_1) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #else void cpu_chebyshev_1x( int number_of_atoms, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_real, real* g_state_imag, real g_bessel_1) { for (int n = 0; n < number_of_atoms; ++n) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #endif // Wrapper for kernel above void Hamiltonian::chebyshev_1x(Vector& input, Vector& output, real bessel_1) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_1x, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); CHECK(hipGetLastError()); #else cpu_chebyshev_1x( n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); #endif } // Kernel which calculates the further terms of [X, U(dt)] #ifndef CPU_ONLY __global__ void gpu_chebyshev_2x( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_xx, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_0x_real, const real* __restrict__ g_state_0x_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, const real* __restrict__ g_state_1x_real, const real* __restrict__ g_state_1x_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag, real* __restrict__ g_state_2x_real, real* __restrict__ g_state_2x_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real g_bessel_m, const int g_label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #else void cpu_chebyshev_2x( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_0_real, real* g_state_0_imag, real* g_state_0x_real, real* g_state_0x_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_2x_real, real* g_state_2x_imag, real* g_state_real, real* g_state_imag, real g_bessel_m, int g_label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2x( Vector& state_0, Vector& state_0x, Vector& state_1, Vector& state_1x, Vector& state_2, Vector& state_2x, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_2x, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(hipGetLastError()); #else cpu_chebyshev_2x( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel for doing the Chebyshev iteration phi_2 = 2 * H * phi_1 - phi_0. #ifndef CPU_ONLY __global__ void gpu_kernel_polynomial( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_kernel_polynomial( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the Chebyshev iteration void Hamiltonian::kernel_polynomial(Vector& state_0, Vector& state_1, Vector& state_2) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_kernel_polynomial, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); CHECK(hipGetLastError()); #else cpu_kernel_polynomial( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); #endif }
5f15c85cb3d16b41fe2f056f32bf8296290ecc12.cu
/* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "hamiltonian.h" #include "model.h" #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY void Hamiltonian::initialize_gpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; grid_size = (model.number_of_atoms - 1) / BLOCK_SIZE + 1; CHECK(hipMalloc((void**)&neighbor_number, sizeof(int) * n)); CHECK(hipMalloc((void**)&neighbor_list, sizeof(int) * model.number_of_pairs)); CHECK(hipMalloc((void**)&potential, sizeof(real) * n)); CHECK(hipMalloc((void**)&hopping_real, sizeof(real) * model.number_of_pairs)); CHECK(hipMalloc((void**)&hopping_imag, sizeof(real) * model.number_of_pairs)); CHECK(hipMalloc((void**)&xx, sizeof(real) * model.number_of_pairs)); CHECK( hipMemcpy(neighbor_number, model.neighbor_number, sizeof(int) * n, hipMemcpyHostToDevice)); delete[] model.neighbor_number; CHECK(hipMemcpy(potential, model.potential, sizeof(real) * n, hipMemcpyHostToDevice)); delete[] model.potential; int* neighbor_list_new = new int[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { neighbor_list_new[m * n + i] = model.neighbor_list[i * max_neighbor + m]; } } delete[] model.neighbor_list; CHECK(hipMemcpy( neighbor_list, neighbor_list_new, sizeof(int) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] neighbor_list_new; real* hopping_real_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_real_new[m * n + i] = model.hopping_real[i * max_neighbor + m]; } } delete[] model.hopping_real; CHECK(hipMemcpy( hopping_real, hopping_real_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] hopping_real_new; real* hopping_imag_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { hopping_imag_new[m * n + i] = model.hopping_imag[i * max_neighbor + m]; } } delete[] model.hopping_imag; CHECK(hipMemcpy( hopping_imag, hopping_imag_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] hopping_imag_new; real* xx_new = new real[model.number_of_pairs]; for (int m = 0; m < max_neighbor; ++m) { for (int i = 0; i < n; ++i) { xx_new[m * n + i] = model.xx[i * max_neighbor + m]; } } delete[] model.xx; CHECK(hipMemcpy(xx, xx_new, sizeof(real) * model.number_of_pairs, hipMemcpyHostToDevice)); delete[] xx_new; } #else void Hamiltonian::initialize_cpu(Model& model) { n = model.number_of_atoms; max_neighbor = model.max_neighbor; energy_max = model.energy_max; int number_of_pairs = model.number_of_pairs; neighbor_number = new int[n]; memcpy(neighbor_number, model.neighbor_number, sizeof(int) * n); delete[] model.neighbor_number; neighbor_list = new int[number_of_pairs]; memcpy(neighbor_list, model.neighbor_list, sizeof(int) * number_of_pairs); delete[] model.neighbor_list; potential = new real[n]; memcpy(potential, model.potential, sizeof(real) * n); delete[] model.potential; hopping_real = new real[number_of_pairs]; memcpy(hopping_real, model.hopping_real, sizeof(real) * number_of_pairs); delete[] model.hopping_real; hopping_imag = new real[number_of_pairs]; memcpy(hopping_imag, model.hopping_imag, sizeof(real) * number_of_pairs); delete[] model.hopping_imag; xx = new real[number_of_pairs]; memcpy(xx, model.xx, sizeof(real) * number_of_pairs); delete[] model.xx; } #endif Hamiltonian::Hamiltonian(Model& model) { #ifndef CPU_ONLY initialize_gpu(model); #else initialize_cpu(model); #endif } Hamiltonian::~Hamiltonian() { #ifndef CPU_ONLY CHECK(hipFree(neighbor_number)); CHECK(hipFree(neighbor_list)); CHECK(hipFree(potential)); CHECK(hipFree(hopping_real)); CHECK(hipFree(hopping_imag)); CHECK(hipFree(xx)); #else delete[] neighbor_number; delete[] neighbor_list; delete[] potential; delete[] hopping_real; delete[] hopping_imag; delete[] xx; #endif } #ifndef CPU_ONLY __global__ void gpu_apply_hamiltonian( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_in_real, const real* __restrict__ g_state_in_imag, real* __restrict__ g_state_out_real, real* __restrict__ g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #else void cpu_apply_hamiltonian( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_in_real[n]; // on-site real temp_imag = g_potential[n] * g_state_in_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale g_state_out_real[n] = temp_real; g_state_out_imag[n] = temp_imag; } } #endif // |output> = H |input> void Hamiltonian::apply(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_hamiltonian, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_hamiltonian( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_commutator( int number_of_atoms, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #else void cpu_apply_commutator( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; real xx = g_xx[index_1]; temp_real -= (a * c - b * d) * xx; temp_imag -= (a * d + b * c) * xx; } g_state_out_real[n] = temp_real / energy_max; // scale g_state_out_imag[n] = temp_imag / energy_max; // scale } } #endif // |output> = [X, H] |input> void Hamiltonian::apply_commutator(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_commutator, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_commutator( n, max_neighbor, energy_max, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_current( const int number_of_atoms, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_xx, const real* __restrict__ g_state_in_real, const real* __restrict__ g_state_in_imag, real* __restrict__ g_state_out_real, real* __restrict__ g_state_out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #else void cpu_apply_current( int number_of_atoms, int max_neighbor, int* g_neighbor_number, int* g_neighbor_list, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_in_real, real* g_state_in_imag, real* g_state_out_real, real* g_state_out_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = 0.0; real temp_imag = 0.0; for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_in_real[index_2]; real d = g_state_in_imag[index_2]; temp_real += (a * c - b * d) * g_xx[index_1]; temp_imag += (a * d + b * c) * g_xx[index_1]; } g_state_out_real[n] = +temp_imag; g_state_out_imag[n] = -temp_real; } } #endif // |output> = V |input> void Hamiltonian::apply_current(Vector& input, Vector& output) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_current, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); CHECK(hipGetLastError()); #else cpu_apply_current( n, max_neighbor, neighbor_number, neighbor_list, hopping_real, hopping_imag, xx, input.real_part, input.imag_part, output.real_part, output.imag_part); #endif } // Kernel which calculates the two first terms of time evolution as described by // Eq. (36) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_01( const int number_of_atoms, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real b0, const real b1, const int direction) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #else void cpu_chebyshev_01( int number_of_atoms, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_real, real* g_state_imag, real b0, real b1, int direction) { for (int n = 0; n < number_of_atoms; ++n) { real bessel_0 = b0; real bessel_1 = b1 * direction; g_state_real[n] = bessel_0 * g_state_0_real[n] + bessel_1 * g_state_1_imag[n]; g_state_imag[n] = bessel_0 * g_state_0_imag[n] - bessel_1 * g_state_1_real[n]; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_01( Vector& state_0, Vector& state_1, Vector& state, real bessel_0, real bessel_1, int direction) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_01, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); CHECK(hipGetLastError()); #else cpu_chebyshev_01( n, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state.real_part, state.imag_part, bessel_0, bessel_1, direction); #endif } // Kernel for calculating further terms of Eq. (36) // in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_2( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real bessel_m, const int label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_chebyshev_2( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_real, real* g_state_imag, real bessel_m, int label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; switch (label) { case 1: { g_state_real[n] += bessel_m * temp_real; g_state_imag[n] += bessel_m * temp_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_real; g_state_imag[n] -= bessel_m * temp_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_imag; g_state_imag[n] -= bessel_m * temp_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_imag; g_state_imag[n] += bessel_m * temp_real; break; } } g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2( Vector& state_0, Vector& state_1, Vector& state_2, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_2, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(hipGetLastError()); #else cpu_chebyshev_2( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel which calculates the two first terms of commutator [X, U(dt)] // Corresponds to Eq. (37) in [Comput. Phys. Commun.185, 28 (2014)]. #ifndef CPU_ONLY __global__ void gpu_chebyshev_1x( const int number_of_atoms, const real* __restrict__ g_state_1x_real, const real* __restrict__ g_state_1x_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real g_bessel_1) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #else void cpu_chebyshev_1x( int number_of_atoms, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_real, real* g_state_imag, real g_bessel_1) { for (int n = 0; n < number_of_atoms; ++n) { real b1 = g_bessel_1; g_state_real[n] = +b1 * g_state_1x_imag[n]; g_state_imag[n] = -b1 * g_state_1x_real[n]; } } #endif // Wrapper for kernel above void Hamiltonian::chebyshev_1x(Vector& input, Vector& output, real bessel_1) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_1x, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); CHECK(hipGetLastError()); #else cpu_chebyshev_1x( n, input.real_part, input.imag_part, output.real_part, output.imag_part, bessel_1); #endif } // Kernel which calculates the further terms of [X, U(dt)] #ifndef CPU_ONLY __global__ void gpu_chebyshev_2x( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_xx, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_0x_real, const real* __restrict__ g_state_0x_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, const real* __restrict__ g_state_1x_real, const real* __restrict__ g_state_1x_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag, real* __restrict__ g_state_2x_real, real* __restrict__ g_state_2x_imag, real* __restrict__ g_state_real, real* __restrict__ g_state_imag, const real g_bessel_m, const int g_label) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #else void cpu_chebyshev_2x( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_xx, real* g_state_0_real, real* g_state_0_imag, real* g_state_0x_real, real* g_state_0x_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_1x_real, real* g_state_1x_imag, real* g_state_2_real, real* g_state_2_imag, real* g_state_2x_real, real* g_state_2x_imag, real* g_state_real, real* g_state_imag, real g_bessel_m, int g_label) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site real temp_x_real = g_potential[n] * g_state_1x_real[n]; // on-site real temp_x_imag = g_potential[n] * g_state_1x_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping real cx = g_state_1x_real[index_2]; real dx = g_state_1x_imag[index_2]; temp_x_real += a * cx - b * dx; // hopping temp_x_imag += a * dx + b * cx; // hopping real xx = g_xx[index_1]; temp_x_real -= (a * c - b * d) * xx; // hopping temp_x_imag -= (a * d + b * c) * xx; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; temp_x_real /= energy_max; // scale temp_x_imag /= energy_max; // scale temp_x_real = 2.0 * temp_x_real - g_state_0x_real[n]; temp_x_imag = 2.0 * temp_x_imag - g_state_0x_imag[n]; g_state_2x_real[n] = temp_x_real; g_state_2x_imag[n] = temp_x_imag; real bessel_m = g_bessel_m; switch (g_label) { case 1: { g_state_real[n] += bessel_m * temp_x_real; g_state_imag[n] += bessel_m * temp_x_imag; break; } case 2: { g_state_real[n] -= bessel_m * temp_x_real; g_state_imag[n] -= bessel_m * temp_x_imag; break; } case 3: { g_state_real[n] += bessel_m * temp_x_imag; g_state_imag[n] -= bessel_m * temp_x_real; break; } case 4: { g_state_real[n] -= bessel_m * temp_x_imag; g_state_imag[n] += bessel_m * temp_x_real; break; } } } } #endif // Wrapper for the kernel above void Hamiltonian::chebyshev_2x( Vector& state_0, Vector& state_0x, Vector& state_1, Vector& state_1x, Vector& state_2, Vector& state_2x, Vector& state, real bessel_m, int label) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_chebyshev_2x, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); CHECK(hipGetLastError()); #else cpu_chebyshev_2x( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, xx, state_0.real_part, state_0.imag_part, state_0x.real_part, state_0x.imag_part, state_1.real_part, state_1.imag_part, state_1x.real_part, state_1x.imag_part, state_2.real_part, state_2.imag_part, state_2x.real_part, state_2x.imag_part, state.real_part, state.imag_part, bessel_m, label); #endif } // Kernel for doing the Chebyshev iteration phi_2 = 2 * H * phi_1 - phi_0. #ifndef CPU_ONLY __global__ void gpu_kernel_polynomial( const int number_of_atoms, const real energy_max, const int* __restrict__ g_neighbor_number, const int* __restrict__ g_neighbor_list, const real* __restrict__ g_potential, const real* __restrict__ g_hopping_real, const real* __restrict__ g_hopping_imag, const real* __restrict__ g_state_0_real, const real* __restrict__ g_state_0_imag, const real* __restrict__ g_state_1_real, const real* __restrict__ g_state_1_imag, real* __restrict__ g_state_2_real, real* __restrict__ g_state_2_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_atoms) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = m * number_of_atoms + n; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #else void cpu_kernel_polynomial( int number_of_atoms, int max_neighbor, real energy_max, int* g_neighbor_number, int* g_neighbor_list, real* g_potential, real* g_hopping_real, real* g_hopping_imag, real* g_state_0_real, real* g_state_0_imag, real* g_state_1_real, real* g_state_1_imag, real* g_state_2_real, real* g_state_2_imag) { for (int n = 0; n < number_of_atoms; ++n) { real temp_real = g_potential[n] * g_state_1_real[n]; // on-site real temp_imag = g_potential[n] * g_state_1_imag[n]; // on-site for (int m = 0; m < g_neighbor_number[n]; ++m) { int index_1 = n * max_neighbor + m; int index_2 = g_neighbor_list[index_1]; real a = g_hopping_real[index_1]; real b = g_hopping_imag[index_1]; real c = g_state_1_real[index_2]; real d = g_state_1_imag[index_2]; temp_real += a * c - b * d; // hopping temp_imag += a * d + b * c; // hopping } temp_real /= energy_max; // scale temp_imag /= energy_max; // scale temp_real = 2.0 * temp_real - g_state_0_real[n]; temp_imag = 2.0 * temp_imag - g_state_0_imag[n]; g_state_2_real[n] = temp_real; g_state_2_imag[n] = temp_imag; } } #endif // Wrapper for the Chebyshev iteration void Hamiltonian::kernel_polynomial(Vector& state_0, Vector& state_1, Vector& state_2) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_kernel_polynomial, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, n, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); CHECK(hipGetLastError()); #else cpu_kernel_polynomial( n, max_neighbor, energy_max, neighbor_number, neighbor_list, potential, hopping_real, hopping_imag, state_0.real_part, state_0.imag_part, state_1.real_part, state_1.imag_part, state_2.real_part, state_2.imag_part); #endif }
530ecfdeb5221a664502c47cd0ca11a88dac12aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <assert.h> #include "Device.h" #include "Rippling.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) : Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4") { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) hipLaunchKernelGGL(( rippling), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t); Device::lastCudaError("rippling rgba uchar4 (after)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
530ecfdeb5221a664502c47cd0ca11a88dac12aa.cu
#include <iostream> #include <assert.h> #include "Device.h" #include "Rippling.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) : Animable_I<uchar4>(grid, w, h, "Rippling_Cuda_RGBA_uchar4") { assert(w == h); // specific rippling // Inputs this->dt = dt; // Tools this->t = 0; // protected dans Animable } Rippling::~Rippling() { // rien } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API * * Note : domaineMath pas use car pas zoomable */ void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Device::lastCudaError("rippling rgba uchar4 (before)"); // facultatif, for debug only, remove for release // TODO lancer le kernel avec <<<dg,db>>> // le kernel est importer ci-dessus (ligne 19) rippling<<<dg,db>>>(ptrDevPixels,w,h,t); Device::lastCudaError("rippling rgba uchar4 (after)"); // facultatif, for debug only, remove for release } /** * Override * Call periodicly by the API */ void Rippling::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
fb56efcfb4fef3b1d643dee41de55fa002f80a14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vectorAdd(const float *a, const float *b, float *c, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { c[i] = a[i] + b[i]; } for (const clock_t threshold = clock() + 1e+4; clock() < threshold;); } int main(int argc, char *argv[]) { int numElements = 3 << 22; // Allocate vectors a, b and c in host memory. size_t numBytes = sizeof(float) * numElements; float *h_a; float *h_b; float *h_c; hipHostMalloc((void **)&h_a, numBytes); hipHostMalloc((void **)&h_b, numBytes); hipHostMalloc((void **)&h_c, numBytes); // Initialize vectors a and b. for (int i = 0; i < numElements; ++i) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; } // Get the number of CUDA devices. int numDevices; hipGetDeviceCount(&numDevices); // Compute the average number of elements per device and the number of spare elements. int avgElementsPerDevice = numElements / numDevices; int sprElements = numElements - avgElementsPerDevice * numDevices; float **d_a = (float **)malloc(sizeof(float *) * numDevices); float **d_b = (float **)malloc(sizeof(float *) * numDevices); float **d_c = (float **)malloc(sizeof(float *) * numDevices); for (int i = 0, offset = 0; i < numDevices; ++i) { // Determine the number of elements to be processed by the current device. int numElementsCurrentDevice = avgElementsPerDevice + (i < sprElements); // Set device to be used for GPU executions. // highlight: hipSetDevice(i); // Allocate vectors a, b and c in device memory. size_t numBytesCurrentDevice = sizeof(int) * numElementsCurrentDevice; hipMalloc((void **)&d_a[i], numBytesCurrentDevice); hipMalloc((void **)&d_b[i], numBytesCurrentDevice); hipMalloc((void **)&d_c[i], numBytesCurrentDevice); // Copy vectors a and b from host memory to device memory asynchronously. hipMemcpyAsync(d_a[i], h_a + offset, numBytesCurrentDevice, hipMemcpyHostToDevice); hipMemcpyAsync(d_b[i], h_b + offset, numBytesCurrentDevice, hipMemcpyHostToDevice); // Determine the number of threads per block and the number of blocks per grid. unsigned int numThreadsPerBlock = 256; unsigned int numBlocksPerGrid = (numElementsCurrentDevice + numThreadsPerBlock - 1) / numThreadsPerBlock; // Invoke the kernel on device asynchronously. hipLaunchKernelGGL(( vectorAdd), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, d_a[i], d_b[i], d_c[i], numElementsCurrentDevice); // Copy vector c from device memory to host memory asynchronously. hipMemcpyAsync(h_c + offset, d_c[i], numBytesCurrentDevice, hipMemcpyDeviceToHost); // Increase offset to point to the next portion of data. offset += numElementsCurrentDevice; } // Wait for the devices to finish. for (int i = 0; i < numDevices; ++i) { hipSetDevice(i); hipDeviceSynchronize(); } // Validate the result. for (int i = 0; i < numElements; ++i) { float actual = h_c[i]; float expected = h_a[i] + h_b[i]; if (fabs(actual - expected) > 1e-7) { printf("h_c[%d] = %f, expected = %f\n", i, actual, expected); break; } } // Cleanup. for (int i = 0; i < numDevices; ++i) { hipSetDevice(i); hipFree(d_c[i]); hipFree(d_b[i]); hipFree(d_a[i]); } free(d_c); free(d_b); free(d_a); hipHostFree(h_c); hipHostFree(h_b); hipHostFree(h_a); for (int i = 0; i < numDevices; ++i) { hipSetDevice(i); hipDeviceReset(); } }
fb56efcfb4fef3b1d643dee41de55fa002f80a14.cu
#include <stdio.h> __global__ void vectorAdd(const float *a, const float *b, float *c, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { c[i] = a[i] + b[i]; } for (const clock_t threshold = clock() + 1e+4; clock() < threshold;); } int main(int argc, char *argv[]) { int numElements = 3 << 22; // Allocate vectors a, b and c in host memory. size_t numBytes = sizeof(float) * numElements; float *h_a; float *h_b; float *h_c; cudaMallocHost((void **)&h_a, numBytes); cudaMallocHost((void **)&h_b, numBytes); cudaMallocHost((void **)&h_c, numBytes); // Initialize vectors a and b. for (int i = 0; i < numElements; ++i) { h_a[i] = rand() / (float)RAND_MAX; h_b[i] = rand() / (float)RAND_MAX; } // Get the number of CUDA devices. int numDevices; cudaGetDeviceCount(&numDevices); // Compute the average number of elements per device and the number of spare elements. int avgElementsPerDevice = numElements / numDevices; int sprElements = numElements - avgElementsPerDevice * numDevices; float **d_a = (float **)malloc(sizeof(float *) * numDevices); float **d_b = (float **)malloc(sizeof(float *) * numDevices); float **d_c = (float **)malloc(sizeof(float *) * numDevices); for (int i = 0, offset = 0; i < numDevices; ++i) { // Determine the number of elements to be processed by the current device. int numElementsCurrentDevice = avgElementsPerDevice + (i < sprElements); // Set device to be used for GPU executions. // highlight: cudaSetDevice(i); // Allocate vectors a, b and c in device memory. size_t numBytesCurrentDevice = sizeof(int) * numElementsCurrentDevice; cudaMalloc((void **)&d_a[i], numBytesCurrentDevice); cudaMalloc((void **)&d_b[i], numBytesCurrentDevice); cudaMalloc((void **)&d_c[i], numBytesCurrentDevice); // Copy vectors a and b from host memory to device memory asynchronously. cudaMemcpyAsync(d_a[i], h_a + offset, numBytesCurrentDevice, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b[i], h_b + offset, numBytesCurrentDevice, cudaMemcpyHostToDevice); // Determine the number of threads per block and the number of blocks per grid. unsigned int numThreadsPerBlock = 256; unsigned int numBlocksPerGrid = (numElementsCurrentDevice + numThreadsPerBlock - 1) / numThreadsPerBlock; // Invoke the kernel on device asynchronously. vectorAdd<<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a[i], d_b[i], d_c[i], numElementsCurrentDevice); // Copy vector c from device memory to host memory asynchronously. cudaMemcpyAsync(h_c + offset, d_c[i], numBytesCurrentDevice, cudaMemcpyDeviceToHost); // Increase offset to point to the next portion of data. offset += numElementsCurrentDevice; } // Wait for the devices to finish. for (int i = 0; i < numDevices; ++i) { cudaSetDevice(i); cudaDeviceSynchronize(); } // Validate the result. for (int i = 0; i < numElements; ++i) { float actual = h_c[i]; float expected = h_a[i] + h_b[i]; if (fabs(actual - expected) > 1e-7) { printf("h_c[%d] = %f, expected = %f\n", i, actual, expected); break; } } // Cleanup. for (int i = 0; i < numDevices; ++i) { cudaSetDevice(i); cudaFree(d_c[i]); cudaFree(d_b[i]); cudaFree(d_a[i]); } free(d_c); free(d_b); free(d_a); cudaFreeHost(h_c); cudaFreeHost(h_b); cudaFreeHost(h_a); for (int i = 0; i < numDevices; ++i) { cudaSetDevice(i); cudaDeviceReset(); } }
d22d52f8972c7d2b400708f7d2a1cfef19af7b27.hip
// !!! This is a file automatically generated by hipify!!! #include <catpaw/CatTimer.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <thrust\device_vector.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "helper_cuda.h" #include "helper_string.h" #include "pbf_kernel_impl_hip.cuh" typedef unsigned int uint; void copyDeviceBuffer() { hipMemcpyToSymbol(dParam, &hParam, sizeof(SimParam)); } void fetchDeviceBuffer() { hipMemcpyFromSymbol(&hParam, dParam, sizeof(SimParam)); } uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } //================================================== // // // COUNTING SORT // // //=================================================== void calcHash( SimData data, int numParticles ) { getLastCudaError("Kernel execution failed:before calc hash"); uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcHashD << <numBlocks, numThreads >> > (data.particleHash, data.particleIndex, data.pos, numParticles); hipDeviceSynchronize(); getLastCudaError("Kernel execution failed: calc hash"); } void sortParticle( SimData data, int pnum ) { thrust::sort_by_key( thrust::device_ptr<int>(data.particleHash), thrust::device_ptr<int>(data.particleHash + pnum), thrust::device_ptr<int>(data.particleIndex) ); } void reorderDataAndFindCellStart( SimData data, int numParticles, int numCells ) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); hipMemset(data.gridCellStart, 0xffffffff, numCells * sizeof(uint)); //shared memory size uint smemSize = sizeof(uint)*(numThreads + 1); reorderDataAndFindCellStartD << < numBlocks, numThreads, smemSize >> >( data, numParticles); getLastCudaError("Kernel execution failed: reorder data"); } void sortBaryCenterCUDA(SimData data, int numTriangles, int numCells) { uint numThreads, numBlocks; computeGridSize(numTriangles, 256, numBlocks, numThreads); //step 1 calcBaryCenterHashD << <numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc bary center hash"); //step 2 thrust::sort_by_key( thrust::device_ptr<int>(data.baryCenterHash), thrust::device_ptr<int>(data.baryCenterHash + numTriangles), thrust::device_ptr<int>(data.baryCenterIndex) ); //step 3 hipMemset(data.gridCellStartBaryCenter, 0xffffffff, numCells * sizeof(uint)); //shared memory size uint smemSize = sizeof(uint)*(numThreads + 1); reorderBaryCenterAndFindCellStartD << < numBlocks, numThreads, smemSize >> >( data, numTriangles); getLastCudaError("Kernel execution failed: reorder bary center"); } float systime = 0; void predictPosition( SimData data, float dt, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); hipLaunchKernelGGL(( predictPositionD), dim3(numBlocks), dim3(numThreads) , 0, 0, data, dt, numParticles); getLastCudaError("Kernel execution failed: predict pos"); } //========================================================= // // // #Solve Constraints# // // //========================================================= void calcLambda( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); hipLaunchKernelGGL(( calcLambdaD) , dim3(numBlocks), numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: calc lambda"); } void calcDeltaPos( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcDeltaPosD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: calc deltapos"); } void updatePos( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); updatePosD , numBlocks, numThreads , 0, data, numParticles ); getLastCudaError("Kernel execution failed: update pos"); } void calcStablizeDeltaPos(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); hipLaunchKernelGGL(( calcStablizeDeltaPosD) , dim3(numParticles), dim3(numThreads), 0, 0, data,numParticles); getLastCudaError("Kernel execution failed: calc stablize deltapos"); } void updateStablizePos(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); hipLaunchKernelGGL(( updateStablizePosD) , dim3(numParticles), dim3(numThreads), 0, 0, data, numParticles); getLastCudaError("Kernel execution failed: calc stablize deltapos"); } void calcEdgeCons(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); calcEdgeConsD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: calc edge constraint"); } void resetEdgeConsX(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); resetEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: reset edge constraint X"); } void calcEdgeConsX(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); /*calcRubberEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons );*/ calcEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: calc edge constraint X"); } void calcFacetVol( SimData data, int numTriangles ) { uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcFacetVolD << < numBlocks, numThreads >> > ( data, numTriangles ); getLastCudaError("Kernel execution failed: calc vol constraint"); } void calcVolDeltaPos( SimData data, int numTriangles, bool jetGas ) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); begin = thrust::device_pointer_cast(data.facetArea); end = begin + numTriangles; float surfaceArea = thrust::reduce(begin, end); float restvol = hParam.restvol; float dx = (restvol - totalVol) / surfaceArea; dx *= hParam.volumeStiff; printf("%f\n",dx); uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcVolDeltaPosD << < numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc vol deltaPos"); } void calcVolPressureDPos(SimData data, int numTriangles, vector<SimulationObject>& objvec) { thrust::device_ptr<float> facetVol(data.facetVol); //thrust::device_ptr<float> begin(data.facetVol); //thrust::device_ptr<float> end = begin + numTriangles; bool updateVol = false; for (int i=0; i<objvec.size(); i++) { if(!objvec[i].bVolumeCorr) continue; updateVol = true; thrust::device_ptr<float> begin = facetVol + objvec[i].starttriid; thrust::device_ptr<float> end = begin + objvec[i].trinum; float totalVol = thrust::reduce(begin, end); if (totalVol < 20) totalVol = 20; begin = thrust::device_pointer_cast(data.facetArea + objvec[i].starttriid ); end = begin + objvec[i].trinum; float surfaceArea = thrust::reduce(begin, end); float nRT = objvec[i].nRT; float pI = nRT / totalVol; float pE = hParam.pE; //printf("%f %f %f\n", pI, nRT, totalVol); float totalMass = 1; float dx = (pI - pE) * surfaceArea / totalMass * hParam.volumeStiff; objvec[i].dx = dx; } if (updateVol) { hipMemcpy(data.objs, objvec.data(), objvec.size()*sizeof(SimulationObject), hipMemcpyHostToDevice); uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcVolDeltaPosD << < numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc vol pressure deltaPos"); } } float getVol(SimData data, int numTriangles) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); return totalVol; } float getJetVol(SimData data, int numTriangles) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); float surfaceArea = 6; float restvol = hParam.restvol; float dx = (restvol - totalVol) / surfaceArea; dx *= hParam.volumeStiff; return dx * surfaceArea * 50; } void getJetVolPressure(SimData data, int numTriangles, vector<SimulationObject>& objvec) { //thrust::device_ptr<float> begin(data.facetVol); //thrust::device_ptr<float> end = begin + numTriangles; thrust::device_ptr<float> facetVol(data.facetVol); for (int i=0; i<objvec.size(); i++) { if(!objvec[i].bJetGas) continue; thrust::device_ptr<float> begin = facetVol + objvec[i].starttriid; thrust::device_ptr<float> end = begin + objvec[i].trinum; float totalVol = thrust::reduce(begin, end); if (totalVol < 10) totalVol = 10; float surfaceArea = 6; //float nRT = hParam.restvol; float nRT = objvec[i].nRT; float pI = nRT / totalVol; float pE = hParam.pE; float dnRT = (pI - pE)*pI * surfaceArea*hParam.dt / hParam.resistance; if (dnRT < 0.00001) dnRT = 0; objvec[i].nRT -= dnRT; if (objvec[i].nRT < 10) objvec[i].nRT = 10; } } void labelCollisionCell(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); labelCollisionCellD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void detectCollision( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); detectCollisionD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void detectCollisionWithMesh (SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); detectCollisionWithMeshD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void calcParticleNormal(SimData data, int numTriangles, int numParticles) { uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcFacetNormalD << <numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc facet normal"); computeGridSize(numParticles, 256, numBlocks, numThreads); calcParticleNormalD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: calc particle normal"); } void updateVel( SimData data, int numParticles, float dt ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); updateVelD << < numBlocks, numThreads >> >( data, numParticles, dt); getLastCudaError("Kernel execution failed: update vel"); } void applyXSPH( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); applyXSPHD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: apply xpsh"); } bool start=true; void waterAbsorption(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterAbsorptionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water absorption"); } void waterDiffusion(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterDiffusionPredictD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water diffusion predict"); waterDiffusionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water diffusion"); updateDiffusionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: update diffusion"); } void waterEmission(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterEmissionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water emission"); } //======================================= // // // #Update Particle State# // // //======================================= /*void diffuse_fluidphase () { kernel_absorbfluid<<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); kernel_diffuse_predict<<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); kernel_diffuse<<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); }*/ /*void surfacetension () { kernel_yangtao_model <<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); kernel_computeNormal<<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); kernel_computeCurvature<<<hParam.nblock_p, hParam.nthread_p>>>(); hipDeviceSynchronize(); }*/ //======================================= // // // Convariance Matrix // // //======================================= //void computeCovmat () { // kernel_computeAvgpos <<<hParam.nblock_p, hParam.nthread_p>>>(); // hipDeviceSynchronize(); // kernel_computeCovmat <<<hParam.nblock_p, hParam.nthread_p>>>(); // hipDeviceSynchronize(); // //return; // float* debug = (float*)malloc(sizeof(float)*9*hParam.pnum); // hipMemcpy(debug, hParam.covmat, sizeof(cmat3)*hParam.pnum, hipMemcpyDeviceToHost); // FILE* fp = fopen("covmat.txt","w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n",i,debug[i*9],debug[i*9+1],debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // hipMemcpy(debug, hParam.u, sizeof(cmat3)*hParam.pnum, hipMemcpyDeviceToHost); // fp = fopen("u.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // hipMemcpy(debug, hParam.s, sizeof(cmat3)*hParam.pnum, hipMemcpyDeviceToHost); // fp = fopen("s.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // hipMemcpy(debug, hParam.v, sizeof(cmat3)*hParam.pnum, hipMemcpyDeviceToHost); // fp = fopen("v.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // free(debug); //}
d22d52f8972c7d2b400708f7d2a1cfef19af7b27.cu
#include <catpaw/CatTimer.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <thrust\device_vector.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include "helper_cuda.h" #include "helper_string.h" #include "pbf_kernel_impl.cuh" typedef unsigned int uint; void copyDeviceBuffer() { cudaMemcpyToSymbol(dParam, &hParam, sizeof(SimParam)); } void fetchDeviceBuffer() { cudaMemcpyFromSymbol(&hParam, dParam, sizeof(SimParam)); } uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } //================================================== // // // COUNTING SORT // // //=================================================== void calcHash( SimData data, int numParticles ) { getLastCudaError("Kernel execution failed:before calc hash"); uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcHashD << <numBlocks, numThreads >> > (data.particleHash, data.particleIndex, data.pos, numParticles); cudaThreadSynchronize(); getLastCudaError("Kernel execution failed: calc hash"); } void sortParticle( SimData data, int pnum ) { thrust::sort_by_key( thrust::device_ptr<int>(data.particleHash), thrust::device_ptr<int>(data.particleHash + pnum), thrust::device_ptr<int>(data.particleIndex) ); } void reorderDataAndFindCellStart( SimData data, int numParticles, int numCells ) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); cudaMemset(data.gridCellStart, 0xffffffff, numCells * sizeof(uint)); //shared memory size uint smemSize = sizeof(uint)*(numThreads + 1); reorderDataAndFindCellStartD << < numBlocks, numThreads, smemSize >> >( data, numParticles); getLastCudaError("Kernel execution failed: reorder data"); } void sortBaryCenterCUDA(SimData data, int numTriangles, int numCells) { uint numThreads, numBlocks; computeGridSize(numTriangles, 256, numBlocks, numThreads); //step 1 calcBaryCenterHashD << <numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc bary center hash"); //step 2 thrust::sort_by_key( thrust::device_ptr<int>(data.baryCenterHash), thrust::device_ptr<int>(data.baryCenterHash + numTriangles), thrust::device_ptr<int>(data.baryCenterIndex) ); //step 3 cudaMemset(data.gridCellStartBaryCenter, 0xffffffff, numCells * sizeof(uint)); //shared memory size uint smemSize = sizeof(uint)*(numThreads + 1); reorderBaryCenterAndFindCellStartD << < numBlocks, numThreads, smemSize >> >( data, numTriangles); getLastCudaError("Kernel execution failed: reorder bary center"); } float systime = 0; void predictPosition( SimData data, float dt, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); predictPositionD<<< numBlocks, numThreads >>>( data, dt, numParticles); getLastCudaError("Kernel execution failed: predict pos"); } //========================================================= // // // #Solve Constraints# // // //========================================================= void calcLambda( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcLambdaD <<< numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: calc lambda"); } void calcDeltaPos( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcDeltaPosD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: calc deltapos"); } void updatePos( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); updatePosD <<< numBlocks, numThreads >>>( data, numParticles ); getLastCudaError("Kernel execution failed: update pos"); } void calcStablizeDeltaPos(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); calcStablizeDeltaPosD <<<numParticles, numThreads>>>(data,numParticles); getLastCudaError("Kernel execution failed: calc stablize deltapos"); } void updateStablizePos(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); updateStablizePosD <<<numParticles, numThreads>>>(data, numParticles); getLastCudaError("Kernel execution failed: calc stablize deltapos"); } void calcEdgeCons(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); calcEdgeConsD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: calc edge constraint"); } void resetEdgeConsX(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); resetEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: reset edge constraint X"); } void calcEdgeConsX(SimData data, int numEdgeCons) { uint numBlocks, numThreads; computeGridSize(numEdgeCons, 256, numBlocks, numThreads); /*calcRubberEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons );*/ calcEdgeConsXD << < numBlocks, numThreads >> >( data, numEdgeCons ); getLastCudaError("Kernel execution failed: calc edge constraint X"); } void calcFacetVol( SimData data, int numTriangles ) { uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcFacetVolD << < numBlocks, numThreads >> > ( data, numTriangles ); getLastCudaError("Kernel execution failed: calc vol constraint"); } void calcVolDeltaPos( SimData data, int numTriangles, bool jetGas ) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); begin = thrust::device_pointer_cast(data.facetArea); end = begin + numTriangles; float surfaceArea = thrust::reduce(begin, end); float restvol = hParam.restvol; float dx = (restvol - totalVol) / surfaceArea; dx *= hParam.volumeStiff; printf("%f\n",dx); uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcVolDeltaPosD << < numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc vol deltaPos"); } void calcVolPressureDPos(SimData data, int numTriangles, vector<SimulationObject>& objvec) { thrust::device_ptr<float> facetVol(data.facetVol); //thrust::device_ptr<float> begin(data.facetVol); //thrust::device_ptr<float> end = begin + numTriangles; bool updateVol = false; for (int i=0; i<objvec.size(); i++) { if(!objvec[i].bVolumeCorr) continue; updateVol = true; thrust::device_ptr<float> begin = facetVol + objvec[i].starttriid; thrust::device_ptr<float> end = begin + objvec[i].trinum; float totalVol = thrust::reduce(begin, end); if (totalVol < 20) totalVol = 20; begin = thrust::device_pointer_cast(data.facetArea + objvec[i].starttriid ); end = begin + objvec[i].trinum; float surfaceArea = thrust::reduce(begin, end); float nRT = objvec[i].nRT; float pI = nRT / totalVol; float pE = hParam.pE; //printf("%f %f %f\n", pI, nRT, totalVol); float totalMass = 1; float dx = (pI - pE) * surfaceArea / totalMass * hParam.volumeStiff; objvec[i].dx = dx; } if (updateVol) { cudaMemcpy(data.objs, objvec.data(), objvec.size()*sizeof(SimulationObject), cudaMemcpyHostToDevice); uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcVolDeltaPosD << < numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc vol pressure deltaPos"); } } float getVol(SimData data, int numTriangles) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); return totalVol; } float getJetVol(SimData data, int numTriangles) { thrust::device_ptr<float> begin(data.facetVol); thrust::device_ptr<float> end = begin + numTriangles; float totalVol = thrust::reduce(begin, end); float surfaceArea = 6; float restvol = hParam.restvol; float dx = (restvol - totalVol) / surfaceArea; dx *= hParam.volumeStiff; return dx * surfaceArea * 50; } void getJetVolPressure(SimData data, int numTriangles, vector<SimulationObject>& objvec) { //thrust::device_ptr<float> begin(data.facetVol); //thrust::device_ptr<float> end = begin + numTriangles; thrust::device_ptr<float> facetVol(data.facetVol); for (int i=0; i<objvec.size(); i++) { if(!objvec[i].bJetGas) continue; thrust::device_ptr<float> begin = facetVol + objvec[i].starttriid; thrust::device_ptr<float> end = begin + objvec[i].trinum; float totalVol = thrust::reduce(begin, end); if (totalVol < 10) totalVol = 10; float surfaceArea = 6; //float nRT = hParam.restvol; float nRT = objvec[i].nRT; float pI = nRT / totalVol; float pE = hParam.pE; float dnRT = (pI - pE)*pI * surfaceArea*hParam.dt / hParam.resistance; if (dnRT < 0.00001) dnRT = 0; objvec[i].nRT -= dnRT; if (objvec[i].nRT < 10) objvec[i].nRT = 10; } } void labelCollisionCell(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); labelCollisionCellD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void detectCollision( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); detectCollisionD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void detectCollisionWithMesh (SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); detectCollisionWithMeshD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: detect collision"); } void calcParticleNormal(SimData data, int numTriangles, int numParticles) { uint numBlocks, numThreads; computeGridSize(numTriangles, 256, numBlocks, numThreads); calcFacetNormalD << <numBlocks, numThreads >> > (data, numTriangles); getLastCudaError("Kernel execution failed: calc facet normal"); computeGridSize(numParticles, 256, numBlocks, numThreads); calcParticleNormalD << <numBlocks, numThreads >> > (data, numParticles); getLastCudaError("Kernel execution failed: calc particle normal"); } void updateVel( SimData data, int numParticles, float dt ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); updateVelD << < numBlocks, numThreads >> >( data, numParticles, dt); getLastCudaError("Kernel execution failed: update vel"); } void applyXSPH( SimData data, int numParticles ) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); applyXSPHD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: apply xpsh"); } bool start=true; void waterAbsorption(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterAbsorptionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water absorption"); } void waterDiffusion(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterDiffusionPredictD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water diffusion predict"); waterDiffusionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water diffusion"); updateDiffusionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: update diffusion"); } void waterEmission(SimData data, int numParticles) { uint numBlocks, numThreads; computeGridSize(numParticles, 256, numBlocks, numThreads); waterEmissionD << < numBlocks, numThreads >> >( data, numParticles); getLastCudaError("Kernel execution failed: water emission"); } //======================================= // // // #Update Particle State# // // //======================================= /*void diffuse_fluidphase () { kernel_absorbfluid<<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); kernel_diffuse_predict<<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); kernel_diffuse<<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); }*/ /*void surfacetension () { kernel_yangtao_model <<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); kernel_computeNormal<<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); kernel_computeCurvature<<<hParam.nblock_p, hParam.nthread_p>>>(); cudaThreadSynchronize(); }*/ //======================================= // // // Convariance Matrix // // //======================================= //void computeCovmat () { // kernel_computeAvgpos <<<hParam.nblock_p, hParam.nthread_p>>>(); // cudaThreadSynchronize(); // kernel_computeCovmat <<<hParam.nblock_p, hParam.nthread_p>>>(); // cudaThreadSynchronize(); // //return; // float* debug = (float*)malloc(sizeof(float)*9*hParam.pnum); // cudaMemcpy(debug, hParam.covmat, sizeof(cmat3)*hParam.pnum, cudaMemcpyDeviceToHost); // FILE* fp = fopen("covmat.txt","w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n",i,debug[i*9],debug[i*9+1],debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // cudaMemcpy(debug, hParam.u, sizeof(cmat3)*hParam.pnum, cudaMemcpyDeviceToHost); // fp = fopen("u.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // cudaMemcpy(debug, hParam.s, sizeof(cmat3)*hParam.pnum, cudaMemcpyDeviceToHost); // fp = fopen("s.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // cudaMemcpy(debug, hParam.v, sizeof(cmat3)*hParam.pnum, cudaMemcpyDeviceToHost); // fp = fopen("v.txt", "w+"); // for (int i=0; i<hParam.pnum; i++) { // fprintf(fp, "%d\n%f %f %f\n%f %f %f\n %f %f %f\n", i, debug[i*9], debug[i*9+1], debug[i*9+2], // debug[i*9+3], debug[i*9+4], debug[i*9+5], // debug[i*9+6], debug[i*9+7], debug[i*9+8]); // } // fclose(fp); // free(debug); //}
9c4aad2689fe5bb7e166018b0d15396e78beca8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defs.h" #include "cuda_defs.h" __global__ void compute_dhalf(real *cons, real *dhalf, real *F_1, real *F_2,real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+2)&&(k>=-2)&&(k<nx3+2)) { dhalf[indx] = cons[indx] + .5*dt/dx1[i]*(F_1[indx - 1] - F_1[indx]); #ifdef DIMS2 dhalf[indx] += .5*dt/dx2[j]*(F_2[indx - size_x1] - F_2[indx]); #endif #ifdef DIMS3 dhalf[indx] += .5*dt/dx3[k]*(F_3[indx - size_x12] - F_3[indx]); #endif } } return; } __global__ void update_cons(real *cons, real *intenergy, real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; int nan_check; real dtdx1; #ifdef DIMS2 real dtdx2; #endif #ifdef DIMS3 real dtdx3; #endif for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=0)&&(i<nx1)&&(j>=0)&&(j<nx2)&&(k>=0)&&(k<nx3)) { dtdx1 = dt/dx1[i]; #ifdef DIMS2 dtdx2 = dt/dx2[j]; #endif #ifdef DIMS3 dtdx3 = dt/dx3[k]; #endif for(n=0;n<nf;n++) { //printf("%d (%d,%d), %lg \n",n,i,j,F_1[indx + n*ntot]); cons[indx + n*ntot] += dtdx1*(F_1[indx - 1 + n*ntot]- F_1[indx + n*ntot]); #ifdef DIMS2 cons[indx + n*ntot] += dtdx2*(F_2[indx - size_x1 + n*ntot]- F_2[indx + n*ntot]); #endif #ifdef DIMS3 cons[indx + n*ntot] += dtdx3*(F_3[indx - size_x12 + n*ntot]- F_3[indx + n*ntot]); #endif } intenergy[indx] = cons[indx+4*ntot] - .5*( cons[indx + 1*ntot]*cons[indx + 1*ntot] + cons[indx + 2*ntot]*cons[indx + 2*ntot] + cons[indx + 3*ntot]*cons[indx + 3*ntot])/cons[indx]; } } return; } __global__ void transverse_update(real *UL_1, real *UL_2, real *UL_3, real *UR_1, real *UR_2, real *UR_3, real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { /* * G(i,j) G(i+1,j) * + + + + + + + + + * + + + * + UL(i,j) + UR(i,j) + * + + + * + (i,j) + (i+1,j) + * + + + * + + + + + + + + + * G(i,j-1) G(i+1,j-1) * * */ int i,j,k,n; int indx; real dtdx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot;indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); /* X1 - direction */ if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+3)&&(k>=-2)&&(k<nx3+3)) { dtdx = .5*dt/dx2[j]; for(n=0;n<nf;n++) { UL_1[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + n*ntot] -F_2[indx + n*ntot]); UR_1[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + 1 + n*ntot] -F_2[indx + 1 + n*ntot]); } /* Add X3 flux */ #ifdef DIMS3 dtdx = .5*dt/dx3[k]; for(n=0;n<nf;n++) { UL_1[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + n*ntot] -F_3[indx + n*ntot]); UR_1[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + 1 + n*ntot] -F_3[indx + 1 + n*ntot]); } #endif } /* X2 - direction */ if ((i>=-2)&&(i<nx1+3)&&(j>=-2)&&(j<nx2+2)&&(k>=-2)&&(k<nx3+3)) { /* Add X1 flux */ dtdx = .5*dt/dx1[i]; for(n=0;n<nf;n++) { UL_2[indx + n*ntot] += dtdx*(F_1[indx - 1 + n*ntot] -F_1[indx + n*ntot]); UR_2[indx + n*ntot] += dtdx*(F_1[indx - 1 + size_x1 + n*ntot] -F_1[indx + size_x1 + n*ntot]); } /* Add X3 flux */ #ifdef DIMS3 /* Add X1 flux */ dtdx = .5*dt/dx3[k]; for(n=0;n<nf;n++) { UL_2[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + n*ntot] -F_3[indx + n*ntot]); UR_2[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + size_x1 + n*ntot] -F_3[indx + size_x1 + n*ntot]); } #endif } /* X3 - direction */ #ifdef DIMS3 if ((i>=-2)&&(i<nx1+3)&&(j>=-2)&&(j<nx2+3)&&(k>=-2)&&(k<nx3+2)) { /* Add X1 flux */ dtdx = .5*dt/dx1[i]; for(n=0;n<nf;n++) { UL_3[indx + n*ntot] += dtdx*(F_1[indx - 1 + n*ntot] -F_1[indx + n*ntot]); UR_3[indx + n*ntot] += dtdx*(F_1[indx - 1 + size_x12 + n*ntot] -F_1[indx + size_x12 + n*ntot]); } /* Add X2 flux */ dtdx = .5*dt/dx2[j]; for(n=0;n<nf;n++) { UL_3[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + n*ntot] -F_2[indx + n*ntot]); UR_3[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + size_x12 + n*ntot] -F_2[indx + size_x12 + n*ntot]); } } #endif } return; } __global__ void cons_to_prim(real *cons, real *intenergy, real *prim, real g1, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-NGHX1)&&(i<nx1+NGHX1)&&(j>=-NGHX2)&&(j<nx2+NGHX2)&&(k>=-NGHX3)&&(k<nx3+NGHX3)) { prim[indx + 0 *ntot] = cons[indx + 0*ntot]; prim[indx + 1 *ntot] = cons[indx + 1*ntot]/cons[indx]; prim[indx + 2 *ntot] = cons[indx + 2*ntot]/cons[indx]; prim[indx + 3 *ntot] = cons[indx + 3*ntot]/cons[indx]; prim[indx + 4 *ntot] = intenergy[indx] * g1; for(n=5;n<nf;n++) prim[indx + n*ntot] = cons[indx + n*ntot]/cons[indx]; } } return; } __global__ void prim_to_cons(real *cons, real *intenergy, real *prim, real g1, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-NGHX1)&&(i<nx1+NGHX1)&&(j>=-NGHX2)&&(j<nx2+NGHX2)&&(k>=-NGHX3)&&(k<nx3+NGHX3)) { cons[indx + 0 *ntot] = prim[indx + 0*ntot]; cons[indx + 1 *ntot] = prim[indx + 1*ntot]*prim[indx]; cons[indx + 2 *ntot] = prim[indx + 2*ntot]*prim[indx]; cons[indx + 3 *ntot] = prim[indx + 3*ntot]*prim[indx]; intenergy[indx ] = prim[indx + 4*ntot]/g1; cons[indx + 4 *ntot] = intenergy[indx] + .5*prim[indx]*(prim[indx + 1*ntot]*prim[indx + 1*ntot] + prim[indx + 2*ntot]*prim[indx + 2*ntot] + prim[indx + 3*ntot]*prim[indx + 3*ntot]); for(n=5;n<nf;n++) cons[indx + n*ntot] = prim[indx + n*ntot]*prim[indx]; } } return; }
9c4aad2689fe5bb7e166018b0d15396e78beca8d.cu
#include "defs.h" #include "cuda_defs.h" __global__ void compute_dhalf(real *cons, real *dhalf, real *F_1, real *F_2,real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+2)&&(k>=-2)&&(k<nx3+2)) { dhalf[indx] = cons[indx] + .5*dt/dx1[i]*(F_1[indx - 1] - F_1[indx]); #ifdef DIMS2 dhalf[indx] += .5*dt/dx2[j]*(F_2[indx - size_x1] - F_2[indx]); #endif #ifdef DIMS3 dhalf[indx] += .5*dt/dx3[k]*(F_3[indx - size_x12] - F_3[indx]); #endif } } return; } __global__ void update_cons(real *cons, real *intenergy, real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; int nan_check; real dtdx1; #ifdef DIMS2 real dtdx2; #endif #ifdef DIMS3 real dtdx3; #endif for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=0)&&(i<nx1)&&(j>=0)&&(j<nx2)&&(k>=0)&&(k<nx3)) { dtdx1 = dt/dx1[i]; #ifdef DIMS2 dtdx2 = dt/dx2[j]; #endif #ifdef DIMS3 dtdx3 = dt/dx3[k]; #endif for(n=0;n<nf;n++) { //printf("%d (%d,%d), %lg \n",n,i,j,F_1[indx + n*ntot]); cons[indx + n*ntot] += dtdx1*(F_1[indx - 1 + n*ntot]- F_1[indx + n*ntot]); #ifdef DIMS2 cons[indx + n*ntot] += dtdx2*(F_2[indx - size_x1 + n*ntot]- F_2[indx + n*ntot]); #endif #ifdef DIMS3 cons[indx + n*ntot] += dtdx3*(F_3[indx - size_x12 + n*ntot]- F_3[indx + n*ntot]); #endif } intenergy[indx] = cons[indx+4*ntot] - .5*( cons[indx + 1*ntot]*cons[indx + 1*ntot] + cons[indx + 2*ntot]*cons[indx + 2*ntot] + cons[indx + 3*ntot]*cons[indx + 3*ntot])/cons[indx]; } } return; } __global__ void transverse_update(real *UL_1, real *UL_2, real *UL_3, real *UR_1, real *UR_2, real *UR_3, real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real dt, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { /* * G(i,j) G(i+1,j) * + + + + + + + + + * + + + * + UL(i,j) + UR(i,j) + * + + + * + (i,j) + (i+1,j) + * + + + * + + + + + + + + + * G(i,j-1) G(i+1,j-1) * * */ int i,j,k,n; int indx; real dtdx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot;indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); /* X1 - direction */ if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+3)&&(k>=-2)&&(k<nx3+3)) { dtdx = .5*dt/dx2[j]; for(n=0;n<nf;n++) { UL_1[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + n*ntot] -F_2[indx + n*ntot]); UR_1[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + 1 + n*ntot] -F_2[indx + 1 + n*ntot]); } /* Add X3 flux */ #ifdef DIMS3 dtdx = .5*dt/dx3[k]; for(n=0;n<nf;n++) { UL_1[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + n*ntot] -F_3[indx + n*ntot]); UR_1[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + 1 + n*ntot] -F_3[indx + 1 + n*ntot]); } #endif } /* X2 - direction */ if ((i>=-2)&&(i<nx1+3)&&(j>=-2)&&(j<nx2+2)&&(k>=-2)&&(k<nx3+3)) { /* Add X1 flux */ dtdx = .5*dt/dx1[i]; for(n=0;n<nf;n++) { UL_2[indx + n*ntot] += dtdx*(F_1[indx - 1 + n*ntot] -F_1[indx + n*ntot]); UR_2[indx + n*ntot] += dtdx*(F_1[indx - 1 + size_x1 + n*ntot] -F_1[indx + size_x1 + n*ntot]); } /* Add X3 flux */ #ifdef DIMS3 /* Add X1 flux */ dtdx = .5*dt/dx3[k]; for(n=0;n<nf;n++) { UL_2[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + n*ntot] -F_3[indx + n*ntot]); UR_2[indx + n*ntot] += dtdx*(F_3[indx - size_x12 + size_x1 + n*ntot] -F_3[indx + size_x1 + n*ntot]); } #endif } /* X3 - direction */ #ifdef DIMS3 if ((i>=-2)&&(i<nx1+3)&&(j>=-2)&&(j<nx2+3)&&(k>=-2)&&(k<nx3+2)) { /* Add X1 flux */ dtdx = .5*dt/dx1[i]; for(n=0;n<nf;n++) { UL_3[indx + n*ntot] += dtdx*(F_1[indx - 1 + n*ntot] -F_1[indx + n*ntot]); UR_3[indx + n*ntot] += dtdx*(F_1[indx - 1 + size_x12 + n*ntot] -F_1[indx + size_x12 + n*ntot]); } /* Add X2 flux */ dtdx = .5*dt/dx2[j]; for(n=0;n<nf;n++) { UL_3[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + n*ntot] -F_2[indx + n*ntot]); UR_3[indx + n*ntot] += dtdx*(F_2[indx - size_x1 + size_x12 + n*ntot] -F_2[indx + size_x12 + n*ntot]); } } #endif } return; } __global__ void cons_to_prim(real *cons, real *intenergy, real *prim, real g1, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-NGHX1)&&(i<nx1+NGHX1)&&(j>=-NGHX2)&&(j<nx2+NGHX2)&&(k>=-NGHX3)&&(k<nx3+NGHX3)) { prim[indx + 0 *ntot] = cons[indx + 0*ntot]; prim[indx + 1 *ntot] = cons[indx + 1*ntot]/cons[indx]; prim[indx + 2 *ntot] = cons[indx + 2*ntot]/cons[indx]; prim[indx + 3 *ntot] = cons[indx + 3*ntot]/cons[indx]; prim[indx + 4 *ntot] = intenergy[indx] * g1; for(n=5;n<nf;n++) prim[indx + n*ntot] = cons[indx + n*ntot]/cons[indx]; } } return; } __global__ void prim_to_cons(real *cons, real *intenergy, real *prim, real g1, int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) { int i,j,k,n; int indx; for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) { unpack_indices(indx,&i,&j,&k,size_x1,size_x12); if ((i>=-NGHX1)&&(i<nx1+NGHX1)&&(j>=-NGHX2)&&(j<nx2+NGHX2)&&(k>=-NGHX3)&&(k<nx3+NGHX3)) { cons[indx + 0 *ntot] = prim[indx + 0*ntot]; cons[indx + 1 *ntot] = prim[indx + 1*ntot]*prim[indx]; cons[indx + 2 *ntot] = prim[indx + 2*ntot]*prim[indx]; cons[indx + 3 *ntot] = prim[indx + 3*ntot]*prim[indx]; intenergy[indx ] = prim[indx + 4*ntot]/g1; cons[indx + 4 *ntot] = intenergy[indx] + .5*prim[indx]*(prim[indx + 1*ntot]*prim[indx + 1*ntot] + prim[indx + 2*ntot]*prim[indx + 2*ntot] + prim[indx + 3*ntot]*prim[indx + 3*ntot]); for(n=5;n<nf;n++) cons[indx + n*ntot] = prim[indx + n*ntot]*prim[indx]; } } return; }
261b8cfd66efb7c552cc00055cc6b03626b31166.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kSquaredDiff.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *dest = NULL; hipMalloc(&dest, XSIZE*YSIZE); unsigned int numEls = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kSquaredDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kSquaredDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kSquaredDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
261b8cfd66efb7c552cc00055cc6b03626b31166.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kSquaredDiff.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *dest = NULL; cudaMalloc(&dest, XSIZE*YSIZE); unsigned int numEls = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kSquaredDiff<<<gridBlock,threadBlock>>>(a,b,dest,numEls); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kSquaredDiff<<<gridBlock,threadBlock>>>(a,b,dest,numEls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kSquaredDiff<<<gridBlock,threadBlock>>>(a,b,dest,numEls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5c75bcf1d347a409a4ead09296d0b848eb4581e2.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_fp16.h> #include <hipcub/hipcub.hpp> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const phi::GPUContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { paddle::distributed::ProcessGroup *pg = map->get(ring_id); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(tensor); out_tensor.push_back(tensor); paddle::distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { auto dtype = platform::ToNCCLDataType( framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); } #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } template <typename T> class FusedAttentionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto *input_x = ctx.Input<Tensor>("X"); const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_bias = ctx.Input<Tensor>("LnBias"); auto *ln_mean = ctx.Output<Tensor>("LnMean"); auto *ln_var = ctx.Output<Tensor>("LnVariance"); auto *ln_out = ctx.Output<Tensor>("LnOut"); // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *qkv_out = ctx.Output<Tensor>("QKVOut"); auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut"); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2"); auto *cache_kv = ctx.Input<Tensor>("CacheKV"); auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut"); auto *qk_out = ctx.Output<Tensor>("QKOut"); auto *qktv_out = ctx.Output<Tensor>("QKTVOut"); auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut"); auto *fmha_out = ctx.Output<Tensor>("FMHAOut"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut"); auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale"); auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias"); auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Output<Tensor>("BiasDropoutResidualOut"); auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean"); auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance"); const float ln_epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // final output. auto *out = ctx.Output<Tensor>("Y"); // get data ptr for qkv part. const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); auto *x_data = input_x->data<T>(); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace()); auto *qkv_bias_out_data = (qkv_bias == nullptr) ? nullptr : qkv_bias_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for FMHA. auto *transpose_out_2_data = transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *cache_kv_out_data = (cache_kv_out == nullptr) ? nullptr : cache_kv_out->mutable_data<T>(ctx.GetPlace()); auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace()); auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace()); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace()); auto *attn_dropout_mask_out_data = attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *attn_dropout_out_data = attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for out_linear. auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for bias+dropout+residual+layernorm auto *dropout_mask_out_data = dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *final_out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; auto layer_norm_compute = AttnLayerNorm<T>( ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); bool compute_bias = true; if (qkv_bias == nullptr) { compute_bias = false; } // (transA, transB, compute_bias) = (false, true, true) auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq, output_size, input_size, compute_bias); AttnDropoutParam attn_dropout_param(is_test_1, dropout_implementation_1, attn_dropout_rate, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; // (transA, transB, compute_bias) = (false, false, false) // NOTE(Yuang Liu): For general input size == output size, change the // position won't have effects. For mp, the output size is mp_head * dkey // which is actually the input size. While the input size is hidden size, // which is actually the output size. So for out linear, switch the // input size and output size. auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq, input_size, output_size, false); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln_epsilon); if (pre_layer_norm) { auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>()); auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace()); auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace()); auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace()); layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, ln_out_data, ln_mean_data, ln_var_data); qkv_compute.ComputeForward( qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out); } else { qkv_compute.ComputeForward( qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out); } if (qkv_bias == nullptr) { fmha_ref_compute.ComputeForward(*qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } else { fmha_ref_compute.ComputeForward(*qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } // fmha_out: [batch_size, seq_len, num_head, head_dim] // weight: [embed_dim, embed_dim] // out_linear_out: [batch_size, seq_len, embed_dim] out_linear_compute.ComputeForward( out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr); // tensor model parallel AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context()); bool add_residual = ctx.Attr<bool>("add_residual"); const T *residual_ptr = add_residual ? x_data : nullptr; if (pre_layer_norm) { // output = (residual + dropout(input + bias)) fused_dropout_layernorm_helper.ResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, residual_ptr, out_linear_bias_data, final_out_data, dropout_mask_out_data); } else { // TODO(Xreki): support post layer_norm case when add_residual is false. PADDLE_ENFORCE_EQ(add_residual, true, platform::errors::InvalidArgument( "Attribute add_residual is expected to be true " "when pre_layer_norm is false.")); const U *ln_scale_2_ptr = ln_scale_2 ? ln_scale_2->data<U>() : nullptr; const U *ln_bias_2_ptr = ln_bias_2 ? ln_bias_2->data<U>() : nullptr; T *bias_dropout_residual_out_ptr = bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); U *ln_mean_2_ptr = ln_mean_2->mutable_data<U>(ctx.GetPlace()); U *ln_var_2_ptr = ln_var_2->mutable_data<U>(ctx.GetPlace()); // output = layernorm(residual + dropout(input + bias)) fused_dropout_layernorm_helper.LayernormResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, residual_ptr, out_linear_bias_data, ln_scale_2_ptr, ln_bias_2_ptr, bias_dropout_residual_out_ptr, dropout_mask_out_data, final_out_data, ln_mean_2_ptr, ln_var_2_ptr); } } }; template <typename T> class FusedAttentionGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); const float ln2epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // get inputs. auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *d_y_data = d_y->data<T>(); // fw input auto *input_x = ctx.Input<Tensor>("X"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale"); auto *x_data = input_x->data<T>(); auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_2_scale_data = (ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>()); // fw parameters. auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>()); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); // fw output auto *fmha_out = ctx.Input<Tensor>("FMHAOut"); auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2"); auto *qk_out = ctx.Input<Tensor>("QKOut"); auto *qktv_out = ctx.Input<Tensor>("QKTVOut"); auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut"); auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut"); auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean"); auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance"); auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Input<Tensor>("BiasDropoutResidualOut"); auto *fmha_out_data = fmha_out->data<T>(); auto *transpose_out_2_data = transpose_out_2->data<T>(); auto *qk_out_data = qk_out->data<T>(); auto *qktv_out_data = qktv_out->data<T>(); auto *softmax_out_data = softmax_out->data<T>(); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->data<T>(); auto *out_linear_out_data = out_linear_out->data<T>(); auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>(); // output's grad auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut")); auto *d_qkv_bias_out = ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut")); auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut")); auto *d_transpose_out_2 = ctx.Output<Tensor>(framework::GradVarName("TransposeOut2")); auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut")); auto *d_softmax_out = ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut")); auto *d_attn_dropout_out = ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut")); auto *d_src_mask_out = ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut")); auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut")); auto *d_out_linear_out = ctx.Output<Tensor>(framework::GradVarName("OutLinearOut")); auto *d_bias_dropout_residual_out = ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut")); auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace()); // when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the // space can be reused. auto *d_qkv_out_data = (d_qkv_bias_out != nullptr) ? nullptr : d_qkv_out->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_out_data = (d_qkv_bias_out == nullptr) ? nullptr : d_qkv_bias_out->mutable_data<T>(ctx.GetPlace()); auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace()); auto *d_transpose_out_2_data = d_transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace()); auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace()); auto *d_attn_dropout_out_data = d_attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *d_src_mask_out_data = (src_mask == nullptr) ? nullptr : d_src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_out_data = d_out_linear_out->mutable_data<T>(ctx.GetPlace()); // parameter grad auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW")); auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias")); auto *d_out_linear_weight = ctx.Output<Tensor>(framework::GradVarName("OutLinearW")); auto *d_out_linear_bias = ctx.Output<Tensor>(framework::GradVarName("OutLinearBias")); auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale")); auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias")); auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_data = (d_qkv_bias == nullptr) ? nullptr : d_qkv_bias->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_weight_data = d_out_linear_weight->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_bias_data = (d_out_linear_bias == nullptr) ? nullptr : d_out_linear_bias->mutable_data<T>(ctx.GetPlace()); const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool add_residual = ctx.Attr<bool>("add_residual"); Tensor d_residual; T *d_residual_data = nullptr; if (add_residual) { d_residual.Resize(input_x_dims); d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace()); } bool transA = false; bool transB = true; bool compute_qkv_bias = qkv_bias ? true : false; auto layer_norm_compute = AttnLayerNorm<T>( ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, output_size, input_size, compute_qkv_bias); AttnDropoutParam attn_dropout_param(is_test_1, dropout_implementation_1, attn_dropout_prob, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; transA = false; transB = false; bool compute_bias = false; // (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed) auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, input_size, output_size, compute_bias); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln2epsilon); if (pre_layer_norm) { fused_dropout_layernorm_helper.ResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, dropout_mask_out_data, d_out_linear_out_data, d_residual_data, d_out_linear_bias_data); } else { auto *ln_2_mean_data = ln_2_mean->data<U>(); auto *ln_2_var_data = ln_2_var->data<U>(); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>(); auto *d_ln_2_scale_data = (d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_2_bias_data = (d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(ctx.GetPlace())); auto *d_bias_dropout_residual_out_data = d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data, dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data, d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data, d_out_linear_out_data, d_out_linear_bias_data, d_residual_data); } out_linear_compute.ComputeBackward(fmha_out, out_linear_weight, d_out_linear_out, d_fmha_out, d_out_linear_weight, nullptr); if (qkv_bias != nullptr) { fmha_ref_compute.ComputeBackward(*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_bias_out); } else { fmha_ref_compute.ComputeBackward(*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_out); } if (pre_layer_norm) { auto *ln_mean = ctx.Input<Tensor>("LnMean"); auto *ln_var = ctx.Input<Tensor>("LnVariance"); auto *ln_out = ctx.Input<Tensor>("LnOut"); auto *ln_mean_data = ln_mean->data<U>(); auto *ln_var_data = ln_var->data<U>(); auto *ln_out_data = ln_out->data<T>(); auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut")); auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale")); auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias")); auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace()); auto *d_ln_scale_data = (d_ln_scale == nullptr ? nullptr : d_ln_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_bias_data = (d_ln_bias == nullptr ? nullptr : d_ln_bias->mutable_data<U>(ctx.GetPlace())); if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out, d_ln_out, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward( ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context()); layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data, ln_mean_data, ln_var_data, d_x_data, d_ln_scale_data, d_ln_bias_data); } else { if (qkv_bias != nullptr) { qkv_compute.ComputeBackward( input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward( input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context()); } if (add_residual) { // gradient accumulation std::vector<const Tensor *> ins = {&d_residual, d_x}; std::vector<Tensor *> outs = {d_x}; phi::funcs::ElementwiseKernel<T>( ctx.cuda_device_context(), ins, &outs, phi::funcs::AddFunctor<T>()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>, ops::FusedAttentionOpKernel<double>, ops::FusedAttentionOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_attention_grad, ops::FusedAttentionGradKernel<float>, ops::FusedAttentionGradKernel<double>, ops::FusedAttentionGradKernel<plat::float16>);
5c75bcf1d347a409a4ead09296d0b848eb4581e2.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_fp16.h> #include <cub/cub.cuh> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/fused/attention_layer_norm.h" #include "paddle/fluid/operators/fused/attn_gemm.h" #include "paddle/fluid/operators/fused/fmha_ref.h" #include "paddle/fluid/operators/fused/fused_dropout_helper.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/funcs/math_function.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/ProcessGroup.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #endif namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> static void AllReduce(framework::Tensor &tensor, // NOLINT const int ring_id, const phi::GPUContext &ctx) { if (ring_id == -1) return; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance(); if (map->has(ring_id)) { paddle::distributed::ProcessGroup *pg = map->get(ring_id); std::vector<phi::DenseTensor> in_tensor; std::vector<phi::DenseTensor> out_tensor; in_tensor.push_back(tensor); out_tensor.push_back(tensor); paddle::distributed::AllreduceOptions opts; opts.reduce_op = distributed::ReduceOp::SUM; auto task = pg->AllReduce(in_tensor, out_tensor, opts); task->Wait(); } else { auto dtype = platform::ToNCCLDataType( framework::TransToProtoVarType(tensor.dtype())); int64_t numel = tensor.numel(); const void *sendbuff = tensor.data<T>(); auto place = ctx.GetPlace(); void *recvbuff = tensor.mutable_data<T>(place); auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place); auto stream = ctx.stream(); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream)); } #else PADDLE_THROW(platform::errors::Unimplemented( "PaddlePaddle should compile with NCCL or RCCL when used tensor model " "parallel op.")); #endif } template <typename T> class FusedAttentionOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; auto *input_x = ctx.Input<Tensor>("X"); const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_bias = ctx.Input<Tensor>("LnBias"); auto *ln_mean = ctx.Output<Tensor>("LnMean"); auto *ln_var = ctx.Output<Tensor>("LnVariance"); auto *ln_out = ctx.Output<Tensor>("LnOut"); // x: qkv's input [batch_size, seq_len, dim_embed] // y: qkv's weight: [3, num_head, dim_head, dim_embed] auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *qkv_out = ctx.Output<Tensor>("QKVOut"); auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut"); auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2"); auto *cache_kv = ctx.Input<Tensor>("CacheKV"); auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut"); auto *qk_out = ctx.Output<Tensor>("QKOut"); auto *qktv_out = ctx.Output<Tensor>("QKTVOut"); auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut"); auto *fmha_out = ctx.Output<Tensor>("FMHAOut"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut"); auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale"); auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias"); auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Output<Tensor>("BiasDropoutResidualOut"); auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean"); auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance"); const float ln_epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // final output. auto *out = ctx.Output<Tensor>("Y"); // get data ptr for qkv part. const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); auto *x_data = input_x->data<T>(); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace()); auto *qkv_bias_out_data = (qkv_bias == nullptr) ? nullptr : qkv_bias_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for FMHA. auto *transpose_out_2_data = transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *cache_kv_out_data = (cache_kv_out == nullptr) ? nullptr : cache_kv_out->mutable_data<T>(ctx.GetPlace()); auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace()); auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace()); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace()); auto *attn_dropout_mask_out_data = attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *attn_dropout_out_data = attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for out_linear. auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace()); // get data ptr for bias+dropout+residual+layernorm auto *dropout_mask_out_data = dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace()); auto *final_out_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; auto layer_norm_compute = AttnLayerNorm<T>( ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); bool compute_bias = true; if (qkv_bias == nullptr) { compute_bias = false; } // (transA, transB, compute_bias) = (false, true, true) auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq, output_size, input_size, compute_bias); AttnDropoutParam attn_dropout_param(is_test_1, dropout_implementation_1, attn_dropout_rate, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; // (transA, transB, compute_bias) = (false, false, false) // NOTE(Yuang Liu): For general input size == output size, change the // position won't have effects. For mp, the output size is mp_head * dkey // which is actually the input size. While the input size is hidden size, // which is actually the output size. So for out linear, switch the // input size and output size. auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq, input_size, output_size, false); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln_epsilon); if (pre_layer_norm) { auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>()); auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace()); auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace()); auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace()); layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data, ln_out_data, ln_mean_data, ln_var_data); qkv_compute.ComputeForward( qkv_weight, ln_out, qkv_bias, qkv_out, qkv_bias_out); } else { qkv_compute.ComputeForward( qkv_weight, input_x, qkv_bias, qkv_out, qkv_bias_out); } if (qkv_bias == nullptr) { fmha_ref_compute.ComputeForward(*qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } else { fmha_ref_compute.ComputeForward(*qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out, src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out, qktv_out, fmha_out); } // fmha_out: [batch_size, seq_len, num_head, head_dim] // weight: [embed_dim, embed_dim] // out_linear_out: [batch_size, seq_len, embed_dim] out_linear_compute.ComputeForward( out_linear_weight, fmha_out, nullptr, out_linear_out, nullptr); // tensor model parallel AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context()); bool add_residual = ctx.Attr<bool>("add_residual"); const T *residual_ptr = add_residual ? x_data : nullptr; if (pre_layer_norm) { // output = (residual + dropout(input + bias)) fused_dropout_layernorm_helper.ResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, residual_ptr, out_linear_bias_data, final_out_data, dropout_mask_out_data); } else { // TODO(Xreki): support post layer_norm case when add_residual is false. PADDLE_ENFORCE_EQ(add_residual, true, platform::errors::InvalidArgument( "Attribute add_residual is expected to be true " "when pre_layer_norm is false.")); const U *ln_scale_2_ptr = ln_scale_2 ? ln_scale_2->data<U>() : nullptr; const U *ln_bias_2_ptr = ln_bias_2 ? ln_bias_2->data<U>() : nullptr; T *bias_dropout_residual_out_ptr = bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); U *ln_mean_2_ptr = ln_mean_2->mutable_data<U>(ctx.GetPlace()); U *ln_var_2_ptr = ln_var_2->mutable_data<U>(ctx.GetPlace()); // output = layernorm(residual + dropout(input + bias)) fused_dropout_layernorm_helper.LayernormResidualDropoutBias( ctx.cuda_device_context(), out_linear_out_data, residual_ptr, out_linear_bias_data, ln_scale_2_ptr, ln_bias_2_ptr, bias_dropout_residual_out_ptr, dropout_mask_out_data, final_out_data, ln_mean_2_ptr, ln_var_2_ptr); } } }; template <typename T> class FusedAttentionGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { using U = LayerNormParamType<T>; const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm"); const float epsilon = ctx.Attr<float>("epsilon"); const float ln2epsilon = ctx.Attr<float>("ln_epsilon"); float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate"); bool is_test_1 = ctx.Attr<bool>("is_test"); auto &dropout_implementation_1 = ctx.Attr<std::string>("attn_dropout_implementation"); bool is_upscale_in_train_1 = (dropout_implementation_1 == "upscale_in_train"); auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr; bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed"); int seed_val_1 = ctx.Attr<int>("attn_dropout_seed"); int ring_id = ctx.Attr<int>("ring_id"); // get inputs. auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); auto *d_y_data = d_y->data<T>(); // fw input auto *input_x = ctx.Input<Tensor>("X"); auto *ln_scale = ctx.Input<Tensor>("LnScale"); auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale"); auto *x_data = input_x->data<T>(); auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>()); auto *ln_2_scale_data = (ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>()); // fw parameters. auto *src_mask = ctx.Input<Tensor>("SrcMask"); auto *qkv_weight = ctx.Input<Tensor>("QKVW"); auto *qkv_bias = ctx.Input<Tensor>("QKVBias"); auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW"); auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias"); auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>()); auto *qkv_weight_data = qkv_weight->data<T>(); auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>(); auto *out_linear_weight_data = out_linear_weight->data<T>(); auto *out_linear_bias_data = (out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>(); // fw output auto *fmha_out = ctx.Input<Tensor>("FMHAOut"); auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2"); auto *qk_out = ctx.Input<Tensor>("QKOut"); auto *qktv_out = ctx.Input<Tensor>("QKTVOut"); auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut"); auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut"); auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut"); auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut"); auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut"); auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean"); auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance"); auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut"); auto *bias_dropout_residual_out = ctx.Input<Tensor>("BiasDropoutResidualOut"); auto *fmha_out_data = fmha_out->data<T>(); auto *transpose_out_2_data = transpose_out_2->data<T>(); auto *qk_out_data = qk_out->data<T>(); auto *qktv_out_data = qktv_out->data<T>(); auto *softmax_out_data = softmax_out->data<T>(); auto *src_mask_out_data = (src_mask == nullptr) ? nullptr : src_mask_out->data<T>(); auto *out_linear_out_data = out_linear_out->data<T>(); auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>(); // output's grad auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut")); auto *d_qkv_bias_out = ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut")); auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut")); auto *d_transpose_out_2 = ctx.Output<Tensor>(framework::GradVarName("TransposeOut2")); auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut")); auto *d_softmax_out = ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut")); auto *d_attn_dropout_out = ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut")); auto *d_src_mask_out = ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut")); auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut")); auto *d_out_linear_out = ctx.Output<Tensor>(framework::GradVarName("OutLinearOut")); auto *d_bias_dropout_residual_out = ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut")); auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace()); // when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the // space can be reused. auto *d_qkv_out_data = (d_qkv_bias_out != nullptr) ? nullptr : d_qkv_out->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_out_data = (d_qkv_bias_out == nullptr) ? nullptr : d_qkv_bias_out->mutable_data<T>(ctx.GetPlace()); auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace()); auto *d_transpose_out_2_data = d_transpose_out_2->mutable_data<T>(ctx.GetPlace()); auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace()); auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace()); auto *d_attn_dropout_out_data = d_attn_dropout_out->mutable_data<T>(ctx.GetPlace()); auto *d_src_mask_out_data = (src_mask == nullptr) ? nullptr : d_src_mask_out->mutable_data<T>(ctx.GetPlace()); auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_out_data = d_out_linear_out->mutable_data<T>(ctx.GetPlace()); // parameter grad auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW")); auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias")); auto *d_out_linear_weight = ctx.Output<Tensor>(framework::GradVarName("OutLinearW")); auto *d_out_linear_bias = ctx.Output<Tensor>(framework::GradVarName("OutLinearBias")); auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale")); auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias")); auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace()); auto *d_qkv_bias_data = (d_qkv_bias == nullptr) ? nullptr : d_qkv_bias->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_weight_data = d_out_linear_weight->mutable_data<T>(ctx.GetPlace()); auto *d_out_linear_bias_data = (d_out_linear_bias == nullptr) ? nullptr : d_out_linear_bias->mutable_data<T>(ctx.GetPlace()); const auto input_x_dims = input_x->dims(); const auto qkv_w_dims = qkv_weight->dims(); int batch_size = input_x_dims[0]; int max_seq_len = input_x_dims[1]; int dim_embed = input_x_dims[2]; int num_head = qkv_w_dims[1]; int dim_head = qkv_w_dims[2]; int bsz_seq = batch_size * max_seq_len; int hidden_size = num_head * dim_head; int output_size = 3 * hidden_size; int input_size = dim_embed; bool add_residual = ctx.Attr<bool>("add_residual"); Tensor d_residual; T *d_residual_data = nullptr; if (add_residual) { d_residual.Resize(input_x_dims); d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace()); } bool transA = false; bool transB = true; bool compute_qkv_bias = qkv_bias ? true : false; auto layer_norm_compute = AttnLayerNorm<T>( ctx.cuda_device_context(), epsilon, bsz_seq, dim_embed); auto qkv_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, output_size, input_size, compute_qkv_bias); AttnDropoutParam attn_dropout_param(is_test_1, dropout_implementation_1, attn_dropout_prob, is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1); auto fmha_ref_compute = FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head, dim_head, attn_dropout_param); output_size = hidden_size; transA = false; transB = false; bool compute_bias = false; // (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed) auto out_linear_compute = AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq, input_size, output_size, compute_bias); DropoutParam dropout_param2(ctx, 0); FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper( ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2, ln2epsilon); if (pre_layer_norm) { fused_dropout_layernorm_helper.ResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, dropout_mask_out_data, d_out_linear_out_data, d_residual_data, d_out_linear_bias_data); } else { auto *ln_2_mean_data = ln_2_mean->data<U>(); auto *ln_2_var_data = ln_2_var->data<U>(); auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>(); auto *d_ln_2_scale_data = (d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_2_bias_data = (d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(ctx.GetPlace())); auto *d_bias_dropout_residual_out_data = d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace()); fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad( ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data, dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data, d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data, d_out_linear_out_data, d_out_linear_bias_data, d_residual_data); } out_linear_compute.ComputeBackward(fmha_out, out_linear_weight, d_out_linear_out, d_fmha_out, d_out_linear_weight, nullptr); if (qkv_bias != nullptr) { fmha_ref_compute.ComputeBackward(*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_bias_out); } else { fmha_ref_compute.ComputeBackward(*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out, *attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out, d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out, d_transpose_out_2, nullptr, d_qkv_out); } if (pre_layer_norm) { auto *ln_mean = ctx.Input<Tensor>("LnMean"); auto *ln_var = ctx.Input<Tensor>("LnVariance"); auto *ln_out = ctx.Input<Tensor>("LnOut"); auto *ln_mean_data = ln_mean->data<U>(); auto *ln_var_data = ln_var->data<U>(); auto *ln_out_data = ln_out->data<T>(); auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut")); auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale")); auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias")); auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace()); auto *d_ln_scale_data = (d_ln_scale == nullptr ? nullptr : d_ln_scale->mutable_data<U>(ctx.GetPlace())); auto *d_ln_bias_data = (d_ln_bias == nullptr ? nullptr : d_ln_bias->mutable_data<U>(ctx.GetPlace())); if (qkv_bias != nullptr) { qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out, d_ln_out, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward( ln_out, qkv_weight, d_qkv_out, d_ln_out, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context()); layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data, ln_mean_data, ln_var_data, d_x_data, d_ln_scale_data, d_ln_bias_data); } else { if (qkv_bias != nullptr) { qkv_compute.ComputeBackward( input_x, qkv_weight, d_qkv_bias_out, d_x, d_qkv_weight, d_qkv_bias); } else { qkv_compute.ComputeBackward( input_x, qkv_weight, d_qkv_out, d_x, d_qkv_weight, d_qkv_bias); } // tensor model parallel AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context()); } if (add_residual) { // gradient accumulation std::vector<const Tensor *> ins = {&d_residual, d_x}; std::vector<Tensor *> outs = {d_x}; phi::funcs::ElementwiseKernel<T>( ctx.cuda_device_context(), ins, &outs, phi::funcs::AddFunctor<T>()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>, ops::FusedAttentionOpKernel<double>, ops::FusedAttentionOpKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(fused_attention_grad, ops::FusedAttentionGradKernel<float>, ops::FusedAttentionGradKernel<double>, ops::FusedAttentionGradKernel<plat::float16>);
ae2ae0f54005d76aec14ba1f69187e0aef5b356d.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #ifndef LATTICE_CU #define LATTICE_CU #include "Lattice_thrust.h" latticed3q19::latticed3q19(int width, int height, int depth, float tau) { _width = width; _height = height; _depth = depth; _tau = tau; _stride = 19; _numberLatticeElements = _width * _height * _depth; _numberAllElements = _stride * _numberLatticeElements; _c = 1 / sqrtf(3.0f); initThrust(); } latticed3q19::~latticed3q19() { } void latticed3q19::step(void) { hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); latticeSolidIndexes_d = latticeSolidIndexes_h; velocityVector_d = velocityVector_h; hipEventRecord(start, 0); stream(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("\nTime for stream: %f ms\n", time); hipEventRecord(start, 0); collide(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time for collide: %f ms\n", time); velocityVector_h = velocityVector_d; } void latticed3q19::initThrust() { for (unsigned int i = 0; i < 19 * 3; i++) { speedDirection_d.push_back(speedDirection_in[i]); } speedDirection_d_ptr = thrust::raw_pointer_cast(speedDirection_d.data()); for (unsigned int i = 0; i < 19; i++) { latticeWeights_d.push_back(latticeWeights_in[i]); } latticeWeights_d_ptr = thrust::raw_pointer_cast(latticeWeights_d.data()); try { f_d = thrust::device_vector<float>(_numberAllElements, 0.0f); } catch (thrust::system_error& e) { std::cerr << "Error: " << e.what() << std::endl; } for (unsigned int k = 0; k < _depth; k++) { for (unsigned int j = 0; j < _height; j++) { for (unsigned int i = 0; i < _width; i++) { for (unsigned int w = 0; w < 19; w++) { //latticeIndexes_h.push_back(make_uint4(i, j, k, w)); latticeIndexes_hi.push_back(i); latticeIndexes_hj.push_back(j); latticeIndexes_hk.push_back(k); latticeIndexes_hw.push_back(w); } } } } latticeIndexes_di = latticeIndexes_hi; latticeIndexes_dj = latticeIndexes_hj; latticeIndexes_dk = latticeIndexes_hk; latticeIndexes_dw = latticeIndexes_hw; ftemp_d = thrust::device_vector<float>(_numberAllElements, 0.0f); latticeSolidIndexes_h = thrust::host_vector<unsigned int>(_numberLatticeElements, 0); latticeSolidIndexes_d = thrust::device_vector<unsigned int>(_numberLatticeElements, 0); velocityVector_h = thrust::host_vector<float>(_numberLatticeElements * 3, 0.0f); velocityVector_d = thrust::device_vector<float>(_numberLatticeElements * 3, 0.0f); latticeWeights_d = latticeWeights_h; } void latticed3q19::stream() { float* f_d_ptr = thrust::raw_pointer_cast(f_d.data()); float* ftemp_d_ptr = thrust::raw_pointer_cast(ftemp_d.data()); unsigned int* lsi_ptr = thrust::raw_pointer_cast(latticeSolidIndexes_d.data()); thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeStream( f_d_ptr, ftemp_d_ptr, speedDirection_d_ptr, _width, _height, _depth, _stride, lsi_ptr ) ); thrust::copy(ftemp_d.begin(), ftemp_d.end(), f_d.begin()); } void latticed3q19::collide(void) { float* f_d_ptr = thrust::raw_pointer_cast(f_d.data()); float* vv_ptr = thrust::raw_pointer_cast(velocityVector_d.data()); unsigned int* lsi_ptr = thrust::raw_pointer_cast(latticeSolidIndexes_d.data()); thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeCollide( f_d_ptr, vv_ptr, speedDirection_d_ptr, latticeWeights_d_ptr, _width, _height, _stride, _tau, lsi_ptr ) ); } void latticed3q19::calculateInEquilibriumFunction(float3 _inVector, float inRo) { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeInEq( thrust::raw_pointer_cast(f_d.data()), thrust::raw_pointer_cast(ftemp_d.data()), _inVector, _width, _height, _stride, inRo, _c ) ); } #endif
ae2ae0f54005d76aec14ba1f69187e0aef5b356d.cu
#pragma once #ifndef LATTICE_CU #define LATTICE_CU #include "Lattice_thrust.h" latticed3q19::latticed3q19(int width, int height, int depth, float tau) { _width = width; _height = height; _depth = depth; _tau = tau; _stride = 19; _numberLatticeElements = _width * _height * _depth; _numberAllElements = _stride * _numberLatticeElements; _c = 1 / sqrtf(3.0f); initThrust(); } latticed3q19::~latticed3q19() { } void latticed3q19::step(void) { cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); latticeSolidIndexes_d = latticeSolidIndexes_h; velocityVector_d = velocityVector_h; cudaEventRecord(start, 0); stream(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\nTime for stream: %f ms\n", time); cudaEventRecord(start, 0); collide(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time for collide: %f ms\n", time); velocityVector_h = velocityVector_d; } void latticed3q19::initThrust() { for (unsigned int i = 0; i < 19 * 3; i++) { speedDirection_d.push_back(speedDirection_in[i]); } speedDirection_d_ptr = thrust::raw_pointer_cast(speedDirection_d.data()); for (unsigned int i = 0; i < 19; i++) { latticeWeights_d.push_back(latticeWeights_in[i]); } latticeWeights_d_ptr = thrust::raw_pointer_cast(latticeWeights_d.data()); try { f_d = thrust::device_vector<float>(_numberAllElements, 0.0f); } catch (thrust::system_error& e) { std::cerr << "Error: " << e.what() << std::endl; } for (unsigned int k = 0; k < _depth; k++) { for (unsigned int j = 0; j < _height; j++) { for (unsigned int i = 0; i < _width; i++) { for (unsigned int w = 0; w < 19; w++) { //latticeIndexes_h.push_back(make_uint4(i, j, k, w)); latticeIndexes_hi.push_back(i); latticeIndexes_hj.push_back(j); latticeIndexes_hk.push_back(k); latticeIndexes_hw.push_back(w); } } } } latticeIndexes_di = latticeIndexes_hi; latticeIndexes_dj = latticeIndexes_hj; latticeIndexes_dk = latticeIndexes_hk; latticeIndexes_dw = latticeIndexes_hw; ftemp_d = thrust::device_vector<float>(_numberAllElements, 0.0f); latticeSolidIndexes_h = thrust::host_vector<unsigned int>(_numberLatticeElements, 0); latticeSolidIndexes_d = thrust::device_vector<unsigned int>(_numberLatticeElements, 0); velocityVector_h = thrust::host_vector<float>(_numberLatticeElements * 3, 0.0f); velocityVector_d = thrust::device_vector<float>(_numberLatticeElements * 3, 0.0f); latticeWeights_d = latticeWeights_h; } void latticed3q19::stream() { float* f_d_ptr = thrust::raw_pointer_cast(f_d.data()); float* ftemp_d_ptr = thrust::raw_pointer_cast(ftemp_d.data()); unsigned int* lsi_ptr = thrust::raw_pointer_cast(latticeSolidIndexes_d.data()); thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeStream( f_d_ptr, ftemp_d_ptr, speedDirection_d_ptr, _width, _height, _depth, _stride, lsi_ptr ) ); thrust::copy(ftemp_d.begin(), ftemp_d.end(), f_d.begin()); } void latticed3q19::collide(void) { float* f_d_ptr = thrust::raw_pointer_cast(f_d.data()); float* vv_ptr = thrust::raw_pointer_cast(velocityVector_d.data()); unsigned int* lsi_ptr = thrust::raw_pointer_cast(latticeSolidIndexes_d.data()); thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeCollide( f_d_ptr, vv_ptr, speedDirection_d_ptr, latticeWeights_d_ptr, _width, _height, _stride, _tau, lsi_ptr ) ); } void latticed3q19::calculateInEquilibriumFunction(float3 _inVector, float inRo) { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.begin(), latticeIndexes_dj.begin(), latticeIndexes_dk.begin(), latticeIndexes_dw.begin() ) ), thrust::make_zip_iterator( thrust::make_tuple( latticeIndexes_di.end(), latticeIndexes_dj.end(), latticeIndexes_dk.end(), latticeIndexes_dw.end() ) ), latticeInEq( thrust::raw_pointer_cast(f_d.data()), thrust::raw_pointer_cast(ftemp_d.data()), _inVector, _width, _height, _stride, inRo, _c ) ); } #endif
97e750d14741f017cb208cd2940ab11ec3bdf54e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cleanup_heights.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; hipMalloc(&Params, XSIZE*YSIZE); const float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const int *st = NULL; hipMalloc(&st, XSIZE*YSIZE); const int *id = NULL; hipMalloc(&id, XSIZE*YSIZE); int *st1 = NULL; hipMalloc(&st1, XSIZE*YSIZE); int *id1 = NULL; hipMalloc(&id1, XSIZE*YSIZE); int *counter = NULL; hipMalloc(&counter, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cleanup_heights), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,x,st,id,st1,id1,counter); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cleanup_heights), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,x,st,id,st1,id1,counter); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cleanup_heights), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,x,st,id,st1,id1,counter); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
97e750d14741f017cb208cd2940ab11ec3bdf54e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cleanup_heights.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; cudaMalloc(&Params, XSIZE*YSIZE); const float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const int *st = NULL; cudaMalloc(&st, XSIZE*YSIZE); const int *id = NULL; cudaMalloc(&id, XSIZE*YSIZE); int *st1 = NULL; cudaMalloc(&st1, XSIZE*YSIZE); int *id1 = NULL; cudaMalloc(&id1, XSIZE*YSIZE); int *counter = NULL; cudaMalloc(&counter, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cleanup_heights<<<gridBlock,threadBlock>>>(Params,x,st,id,st1,id1,counter); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cleanup_heights<<<gridBlock,threadBlock>>>(Params,x,st,id,st1,id1,counter); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cleanup_heights<<<gridBlock,threadBlock>>>(Params,x,st,id,st1,id1,counter); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ae1205dc36dce706491519f64a3f884f60dd3a64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrix_wrappers.h" namespace matrix{ //C = (alpha * A) x (beta * B) //A - M x N //B - N x K //C - M x K __host__ void mult(float* A, float* B, float* C, int M, int N, int K){ mult(A, B, C, 1., 1., false, false, M, N, K); } __host__ void mult(float* A, float* B, float* C, float alpha, float beta, bool TA, bool TB, int M, int N, int K){ //Initializing CuBlas hipblasHandle_t cublasH = NULL; hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS; hipblasOperation_t OA = (TA ? HIPBLAS_OP_T : HIPBLAS_OP_N); hipblasOperation_t OB = (TB ? HIPBLAS_OP_T : HIPBLAS_OP_N); cublas_status = hipblasCreate(&cublasH); assert(HIPBLAS_STATUS_SUCCESS == cublas_status); //Zeroing results hipMemset(C, 0, M * K * sizeof(float)); //Multiplying cublas_status = hipblasSgemm( cublasH, OA, OB, M, K, N, &alpha, A, M, B, N, &beta, C, M ); assert(HIPBLAS_STATUS_SUCCESS == cublas_status); gpuErrchk(hipDeviceSynchronize()); if (cublasH) hipblasDestroy(cublasH); } //Calculates diagonal matrix multiplication //C = A x B //A - 1 x N diagonal (representing MxN) //B - N x K __host__ void multD(float* A, float* B, float* C, int M, int N){ //Initializing CuBlas hipblasHandle_t cublasH = NULL; hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS; cublas_status = hipblasCreate(&cublasH); assert(HIPBLAS_STATUS_SUCCESS == cublas_status); //Zeroing results gpuErrchk(hipMemset(C, 0, M * N * sizeof(float))); //Multiplying cublas_status = hipblasSdgmm( cublasH, HIPBLAS_SIDE_LEFT, M, N, B, M, A, 1, C, M ); assert(HIPBLAS_STATUS_SUCCESS == cublas_status); gpuErrchk(hipDeviceSynchronize()); if (cublasH ) hipblasDestroy(cublasH); } //Nvidia Reference implementation __global__ void transpose_(float *A, float *B, int M, int N){ __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < M) && (yIndex < N)) { unsigned int index_in = yIndex * M + xIndex; block[threadIdx.y][threadIdx.x] = A[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < N) && (yIndex < M)) { unsigned int index_out = yIndex * N + xIndex; B[index_out] = block[threadIdx.x][threadIdx.y]; } } __host__ void transpose(float* A, float* B, int M, int N){ float* C; //Copy matrix if storing back if (A == B){ gpuErrchk(hipMalloc(&C, M * N * sizeof(float))); vector::copy(C, A, M * N); } else C = A; dim3 grid((M - 1 + BLOCK_DIM) / BLOCK_DIM, (N - 1 + BLOCK_DIM) / BLOCK_DIM, 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); hipLaunchKernelGGL(( transpose_), dim3(grid), dim3(threads), 0, 0, C, B, M, N); gpuErrchk(hipDeviceSynchronize()); if (A == B && C) hipFree(C); } __host__ void inverse (float* A, float* B, int N){ float* C; if (A == B) hipMalloc(&C, N * N * sizeof(float)); else C = B; //Initializing CuBlas hipsolverDnHandle_t cusolverH = NULL; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; cusolver_status = hipsolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); int lwork; cusolver_status = hipsolverDnSgetrf_bufferSize( cusolverH, N, N, A, N, &lwork ); gpuErrchk(hipDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); float* work; gpuErrchk(hipMalloc(&work, lwork * sizeof(float))); int* P; gpuErrchk(hipMalloc(&P, N * sizeof(int))); int* devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int))); cusolver_status = hipsolverDnSgetrf( cusolverH, N, N, A, N, work, P, devInfo ); gpuErrchk(hipDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); float* identity = (float*)malloc(N * N * sizeof(float)); for (int i = 0; i < N * N; i++) identity[i] = 0; for (int i = 0; i < N; i++) identity[i * (N+1)] = 1; gpuErrchk(hipMemcpy(C, identity, N * N * sizeof(float), hipMemcpyHostToDevice)); hipsolverDnSgetrs(cusolverH, HIPBLAS_OP_N, N, N, A, N, P, C, N, devInfo ); gpuErrchk(hipDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); if (cusolverH) hipsolverDnDestroy(cusolverH); if (identity) free(identity); if (devInfo) hipFree(devInfo); if (work) hipFree(work); if (P) hipFree(P); if (A == B){ vector::copy(A, C, N * N); hipFree(C); } } }
ae1205dc36dce706491519f64a3f884f60dd3a64.cu
#include "matrix_wrappers.h" namespace matrix{ //C = (alpha * A) x (beta * B) //A - M x N //B - N x K //C - M x K __host__ void mult(float* A, float* B, float* C, int M, int N, int K){ mult(A, B, C, 1., 1., false, false, M, N, K); } __host__ void mult(float* A, float* B, float* C, float alpha, float beta, bool TA, bool TB, int M, int N, int K){ //Initializing CuBlas cublasHandle_t cublasH = NULL; cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS; cublasOperation_t OA = (TA ? CUBLAS_OP_T : CUBLAS_OP_N); cublasOperation_t OB = (TB ? CUBLAS_OP_T : CUBLAS_OP_N); cublas_status = cublasCreate(&cublasH); assert(CUBLAS_STATUS_SUCCESS == cublas_status); //Zeroing results cudaMemset(C, 0, M * K * sizeof(float)); //Multiplying cublas_status = cublasSgemm_v2( cublasH, OA, OB, M, K, N, &alpha, A, M, B, N, &beta, C, M ); assert(CUBLAS_STATUS_SUCCESS == cublas_status); gpuErrchk(cudaDeviceSynchronize()); if (cublasH) cublasDestroy(cublasH); } //Calculates diagonal matrix multiplication //C = A x B //A - 1 x N diagonal (representing MxN) //B - N x K __host__ void multD(float* A, float* B, float* C, int M, int N){ //Initializing CuBlas cublasHandle_t cublasH = NULL; cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS; cublas_status = cublasCreate(&cublasH); assert(CUBLAS_STATUS_SUCCESS == cublas_status); //Zeroing results gpuErrchk(cudaMemset(C, 0, M * N * sizeof(float))); //Multiplying cublas_status = cublasSdgmm( cublasH, CUBLAS_SIDE_LEFT, M, N, B, M, A, 1, C, M ); assert(CUBLAS_STATUS_SUCCESS == cublas_status); gpuErrchk(cudaDeviceSynchronize()); if (cublasH ) cublasDestroy(cublasH); } //Nvidia Reference implementation __global__ void transpose_(float *A, float *B, int M, int N){ __shared__ float block[BLOCK_DIM][BLOCK_DIM+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if((xIndex < M) && (yIndex < N)) { unsigned int index_in = yIndex * M + xIndex; block[threadIdx.y][threadIdx.x] = A[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if((xIndex < N) && (yIndex < M)) { unsigned int index_out = yIndex * N + xIndex; B[index_out] = block[threadIdx.x][threadIdx.y]; } } __host__ void transpose(float* A, float* B, int M, int N){ float* C; //Copy matrix if storing back if (A == B){ gpuErrchk(cudaMalloc(&C, M * N * sizeof(float))); vector::copy(C, A, M * N); } else C = A; dim3 grid((M - 1 + BLOCK_DIM) / BLOCK_DIM, (N - 1 + BLOCK_DIM) / BLOCK_DIM, 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); transpose_<<<grid, threads>>>(C, B, M, N); gpuErrchk(cudaDeviceSynchronize()); if (A == B && C) cudaFree(C); } __host__ void inverse (float* A, float* B, int N){ float* C; if (A == B) cudaMalloc(&C, N * N * sizeof(float)); else C = B; //Initializing CuBlas cusolverDnHandle_t cusolverH = NULL; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; cusolver_status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); int lwork; cusolver_status = cusolverDnSgetrf_bufferSize( cusolverH, N, N, A, N, &lwork ); gpuErrchk(cudaDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); float* work; gpuErrchk(cudaMalloc(&work, lwork * sizeof(float))); int* P; gpuErrchk(cudaMalloc(&P, N * sizeof(int))); int* devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int))); cusolver_status = cusolverDnSgetrf( cusolverH, N, N, A, N, work, P, devInfo ); gpuErrchk(cudaDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); float* identity = (float*)malloc(N * N * sizeof(float)); for (int i = 0; i < N * N; i++) identity[i] = 0; for (int i = 0; i < N; i++) identity[i * (N+1)] = 1; gpuErrchk(cudaMemcpy(C, identity, N * N * sizeof(float), cudaMemcpyHostToDevice)); cusolverDnSgetrs(cusolverH, CUBLAS_OP_N, N, N, A, N, P, C, N, devInfo ); gpuErrchk(cudaDeviceSynchronize()); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); if (cusolverH) cusolverDnDestroy(cusolverH); if (identity) free(identity); if (devInfo) cudaFree(devInfo); if (work) cudaFree(work); if (P) cudaFree(P); if (A == B){ vector::copy(A, C, N * N); cudaFree(C); } } }
fill.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ // dim => rlen (Assumption: rlen == clen) // N = length of dense array extern "C" extern "C" __global__ void fill(double* A, double scalar, int lenA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < lenA){ A[index] = scalar; } }
fill.cu
#include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ // dim => rlen (Assumption: rlen == clen) // N = length of dense array extern "C" extern "C" __global__ void fill(double* A, double scalar, int lenA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < lenA){ A[index] = scalar; } }
2e10fd09c8d8c69fd04dee1619975818c3ae867c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/pooling.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, T* output_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); pool_process.finalize(ele, (static_cast<T>(pool_size))); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add atomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2D< PoolProcess, T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); } }; template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool2DGrad< PoolProcess, T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); } }; template <typename T> class MaxPool2dGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool2DGrad< T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width); } }; template class MaxPool2dGradFunctor<platform::GPUPlace, float>; // template class MaxPool2dGradFunctor<platform::GPUPlace, double>; // The // 64-bit floating-point version of atomicAdd() is only supported by devices of // compute capability 6.x and higher. template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, T* output_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); pool_process.finalize(ele, static_cast<T>(pool_size)); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add atomicAdd(input_grad + maxIdx, output_grad[index]); } } } template <typename PoolProcess, class T> class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3D< PoolProcess, T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); } }; template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelPool3DGrad< PoolProcess, T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); } }; template <class T> class MaxPool3dGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelMaxPool3DGrad< T>), dim3(grid), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width); } }; template class MaxPool3dGradFunctor<platform::GPUPlace, float>; // template class MaxPool3dGradFunctor<platform::GPUPlace, double>; // The // 64-bit floating-point version of atomicAdd() is only supported by devices of // compute capability 6.x and higher. template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>; } // namespace math } // namespace operators } // namespace paddle
2e10fd09c8d8c69fd04dee1619975818c3ae867c.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/pooling.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { template <typename PoolProcess, typename T> __global__ void KernelPool2D(const int nthreads, const T* input_data, T* output_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); pool_process.finalize(ele, (static_cast<T>(pool_size))); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int phend = min(offsetH / stride_height + 1, output_height); int pwend = min(offsetW / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; int batch_idx = index / output_width / output_height / channels; int hstart = ph * stride_height - padding_height; int hend = min(hstart + ksize_height, input_height); hstart = max(hstart, 0); int wstart = pw * stride_width - padding_width; int wend = min(wstart + ksize_width, input_width); wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; input_grad += (batch_idx * channels + c) * input_height * input_width; T ele = output_data[index]; int maxIndex = -1; bool stop = false; for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[h * input_width + w]) { maxIndex = h * input_width + w; stop = true; } } } if (maxIndex != -1) { // atomic add atomicAdd(input_grad + maxIndex, output_grad[index]); } } } template <typename PoolProcess, typename T> class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); T* output_data = output.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2D< PoolProcess, T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>(nthreads, input_data, output_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); } }; template <typename PoolProcess, typename T> class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool2DGrad< PoolProcess, T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); } }; template <typename T> class MaxPool2dGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; const int output_height = output.dims()[2]; const int output_width = output.dims()[3]; const int ksize_height = ksize[0]; const int ksize_width = ksize[1]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool2DGrad< T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width); } }; template class MaxPool2dGradFunctor<platform::GPUPlace, float>; // template class MaxPool2dGradFunctor<platform::GPUPlace, double>; // The // 64-bit floating-point version of atomicAdd() is only supported by devices of // compute capability 6.x and higher. template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<float>, float>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<float>, float>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<double>, double>; template class Pool2dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<double>, double>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool2dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>; template <typename PoolProcess, typename T> __global__ void KernelPool3D( const int nthreads, const T* input_data, T* output_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); pool_process.finalize(ele, static_cast<T>(pool_size)); output_data[index] = ele; } } template <typename PoolProcess, typename T> __global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetD = (index / input_width / input_height) % input_depth + padding_depth; int offsetC = (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; int pdstart = (offsetD < ksize_depth) ? 0 : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; int pwstart = (offsetW < ksize_width) ? 0 : (offsetW - ksize_width) / stride_width + 1; int pdend = min((offsetD) / stride_depth + 1, output_depth); int phend = min((offsetH) / stride_height + 1, output_height); int pwend = min((offsetW) / stride_width + 1, output_width); T gradient = 0; T input = input_data[index]; int output_idx = (batch_idx * channels + offsetC) * output_depth * output_height * output_width; output_data += output_idx; output_grad += output_idx; for (int pd = pdstart; pd < pdend; ++pd) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast<T>(1.0 / pool_size)); } } } input_grad[index] = gradient; } } template <typename T> __global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int pd = (index / output_width / output_height) % output_depth; int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; int dend = min(dstart + ksize_depth, input_depth); int hend = min(hstart + ksize_height, input_height); int wend = min(wstart + ksize_width, input_width); dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); T ele = output_data[index]; bool stop = false; int maxIdx = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; input_grad += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend && !stop; ++d) { for (int h = hstart; h < hend && !stop; ++h) { for (int w = wstart; w < wend && !stop; ++w) { if (ele == input_data[(d * input_height + h) * input_width + w]) { stop = true; maxIdx = (d * input_height + h) * input_width + w; } } } } if (maxIdx != -1) { // atomic add atomicAdd(input_grad + maxIdx, output_grad[index]); } } } template <typename PoolProcess, class T> class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); T* output_data = output.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3D< PoolProcess, T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( nthreads, input_data, output_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); } }; template <typename PoolProcess, class T> class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelPool3DGrad< PoolProcess, T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); } }; template <class T> class MaxPool3dGradFunctor<platform::GPUPlace, T> { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector<int>& ksize, std::vector<int>& strides, std::vector<int>& paddings) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; const int input_width = input.dims()[4]; const int output_channels = output.dims()[1]; const int output_depth = output.dims()[2]; const int output_height = output.dims()[3]; const int output_width = output.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; const int stride_depth = strides[0]; const int stride_height = strides[1]; const int stride_width = strides[2]; const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; const T* input_data = input.data<T>(); const T* output_data = output.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelMaxPool3DGrad< T><<<grid, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width); } }; template class MaxPool3dGradFunctor<platform::GPUPlace, float>; // template class MaxPool3dGradFunctor<platform::GPUPlace, double>; // The // 64-bit floating-point version of atomicAdd() is only supported by devices of // compute capability 6.x and higher. template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<float>, float>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<float>, float>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<float>, float>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<float>, float>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::MaxPool<double>, double>; template class Pool3dFunctor<platform::GPUPlace, paddle::operators::math::AvgPool<double>, double>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>; template class Pool3dGradFunctor< platform::GPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>; } // namespace math } // namespace operators } // namespace paddle
1622d087794d279ed00c7f2953493560407c1699.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKLossGpuData() { hipError_t status; status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "hipMemcpyToSymbol: SetKernelsGpuData copy to cData failed"); } void GetKLossGpuData() { hipError_t status; status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "hipMemcpyFromSymbol: SetKernelsGpuData copy From cData failed"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL1Error_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint64_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = w * fabsf(a); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (fabsf(a - (NNFloat)1.0) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (fabsf(a - (NNFloat)1.0) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroL1Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2Error_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)0.5; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = w * a * a; } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroL2Error_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2HingeError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)0.5; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); error = w * a * a; } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf((NNFloat)t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat diff = min((NNFloat)0.0, pUnit[pos2] - (NNFloat)1.0); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawCrossEntropyError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = -w * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } /* LOOPY while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); pos1 += cData._warpSize; } */ REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (-log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } /* LOOPY while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)) + t * log(max(MIN_ERROR, (NNFloat)1.0 - a)); // -t * log(a) - (1.0 - t) * log(1.0 - a) + log(1.0 - a) pos1 += cData._warpSize; } */ } REDUCEERROR(error) } NNFloat kCalculateSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (-log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseOnlyNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]; pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]; pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawScaledMarginalCrossEntropyError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = cData._SMCE_zeroScale; if (pSparseWeight != NULL) { uint64_t dpos = pos / stride; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) error = -w * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } /* LOOPY } else { while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += cData._SMCE_oneScale * (-t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } } */ } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += w * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -w * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } /* LOOPY } else { while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += cData._SMCE_oneScale * (-t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a))); } pos1 += cData._warpSize; } } */ } REDUCEERROR(error) } NNFloat kCalculateSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += w * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -w * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) { error = -cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseDataScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (!bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel"); } uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseDataScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (!bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pUnit, size); LAUNCHERROR("kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel"); } uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateL1Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedL1Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateL2Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedL2Error_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error = (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateL2HingeError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2HingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error = (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedL2HingeError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedL2HingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos]; NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; //printf("HL %d %f %f %f\n", blockIdx.x, t, y, loss); } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos] * (NNFloat)(1.0 / 128.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<typename T> NNFloat kCalculateHingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); unsigned long threads = max(32, min(stride, 128)); hipLaunchKernelGGL(( kCalculateHingeError_kernel), dim3(batch), dim3(threads), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateHingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos]; NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos] * (NNFloat)(1.0 / 256.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<typename T> NNFloat kCalculateIndexedHingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); unsigned long threads = max(32, min(stride, 128)); hipLaunchKernelGGL(( kCalculateIndexedHingeError_kernel), dim3(batch), dim3(threads), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedHingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateMultinomialCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedMultinomialCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // HERE 2 template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateMultinomialScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { hipMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template NNFloat kCalculateL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateHingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedHingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateSparseAnalogL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateIndexedSparseAnalogMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateSparseDataScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template NNFloat kCalculateIndexedSparseDataScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
1622d087794d279ed00c7f2953493560407c1699.cu
/* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKLossGpuData() { cudaError_t status; status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy to cData failed"); } void GetKLossGpuData() { cudaError_t status; status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "cudaMemcpyFromSymbol: SetKernelsGpuData copy From cData failed"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL1Error_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint64_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = w * fabsf(a); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (fabsf(a - (NNFloat)1.0) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (fabsf(a - (NNFloat)1.0) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * fabsf(a - (NNFloat)1.0); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * fabsf(a - t); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL1Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (fabsf(a - t) - fabsf(a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogOnlyNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL1Error_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL1Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogNonZeroL1Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL1Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2Error_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)0.5; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = w * a * a; } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * ((a - (NNFloat)1.0) * (a - (NNFloat)1.0) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2Error_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * ((a - t) * (a - t) - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogOnlyNonZeroL2Error_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2Error_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogNonZeroL2Error_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL2Error_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawL2HingeError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)0.5; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); error = w * a * a; } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf((NNFloat)t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat diff = min((NNFloat)0.0, pUnit[pos2] - (NNFloat)1.0); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); a = max((NNFloat)0.0, a); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * diff * diff; pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (NNFloat)0.5 * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); a = max((NNFloat)0.0, a); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += w * (diff * diff - a * a); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogOnlyNonZeroL2HingeError_kernel"); } else { uint64_t size = batch * stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawL2HingeError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroL2HingeError_kernel"); } getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawCrossEntropyError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; error = -w * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } /* LOOPY while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); pos1 += cData._warpSize; } */ REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (-log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } /* LOOPY while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -t * log(max(MIN_ERROR, a)) + t * log(max(MIN_ERROR, (NNFloat)1.0 - a)); // -t * log(a) - (1.0 - t) * log(1.0 - a) + log(1.0 - a) pos1 += cData._warpSize; } */ } REDUCEERROR(error) } NNFloat kCalculateSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += w * (-log(max(MIN_ERROR, a)) + log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseOnlyNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]; pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]; pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); error += w * (-t * log(max(MIN_ERROR, a))); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawScaledMarginalCrossEntropyError_kernel(uint32_t position, NNFloat* pSparseWeight, NNFloat* pUnit, uint32_t stride, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat w = cData._SMCE_zeroScale; if (pSparseWeight != NULL) { uint64_t dpos = pos / stride; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) error = -w * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } /* LOOPY } else { while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += cData._SMCE_oneScale * (-t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a))); pos1 += cData._warpSize; } } */ } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += w * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -w * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } /* LOOPY } else { while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += cData._SMCE_oneScale * (-t * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a))); } pos1 += cData._warpSize; } } */ } REDUCEERROR(error) } NNFloat kCalculateSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a > cData._SMCE_zeroTarget) { error += w * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -w * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (bSparseIgnoreZero) { uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseOnlyNonZeroScaledMarginalCrossEntropyError_kernel"); } else { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, pSparseWeight, pUnit, stride, size); LAUNCHERROR("kCalculateSparseRawScaledMarginalCrossEntropyError_kernel"); blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel"); } getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel(NNFloat* pUnit, uint64_t size) { uint64_t pos = blockDim.x * blockIdx.x + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < size) { NNFloat a = pUnit[pos]; if (a > cData._SMCE_zeroTarget) { error = -cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } } REDUCEERROR(error) } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseDataScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (!bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel"); } uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroDataScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a > cData._SMCE_zeroTarget) { error += cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } if (a < cData._SMCE_oneTarget) { error += -cData._SMCE_oneScale * t * log(max(MIN_ERROR, a)); } pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseDataScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); if (!bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; uint32_t blocks = CalculateBlocks(size); kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pUnit, size); LAUNCHERROR("kCalculateSparseRawDataScaledMarginalCrossEntropyError_kernel"); } uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateIndexedSparseNonZeroDataScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight == NULL) ? (NNFloat)1.0 / (NNFloat)(end - pos1) : pSparseWeight[dpos]); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; if (a < cData._SMCE_oneTarget) error += -w * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } NNFloat kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseNonZeroScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 256.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat *pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; NNFloat error = (NNFloat)0.0; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = pSparseData[pos1] * (NNFloat)(1.0 / 128.0); if (a < cData._SMCE_oneTarget) error += -w * t * log(max(MIN_ERROR, a)); pos1 += cData._warpSize; } } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel<<<blocks, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); //printf("Error is %f\n", (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateL1Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = fabsf(a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL1Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = fabsf(a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL1Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedL1Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateL1Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateL2Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2Error_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = (NNFloat)0.5 * (a - t) * (a - t); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL2Error(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedL2Error_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedL2Error_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error = (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<typename T> NNFloat kCalculateL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateL2HingeError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateL2HingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - t; diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); error = (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedL2HingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f , diff) : max((NNFloat)0.0, diff); error += (NNFloat)0.5 * diff * diff; } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedL2HingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedL2HingeError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedL2HingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos]; NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; //printf("HL %d %f %f %f\n", blockIdx.x, t, y, loss); } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos] * (NNFloat)(1.0 / 128.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<typename T> NNFloat kCalculateHingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); unsigned long threads = max(32, min(stride, 128)); kCalculateHingeError_kernel<<<batch, threads>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateHingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos]; NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = pData[pos] * (NNFloat)(1.0 / 256.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { // Calculate initial offsets pUnit += blockIdx.x * stride; pData += pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; // Calculate loss uint32_t pos = threadIdx.x; NNFloat loss = (NNFloat)0.0; while (pos < stride) { NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); NNFloat y = pUnit[pos]; loss += max((NNFloat)0.0, (NNFloat)1.0 - t * y); pos += blockDim.x; } REDUCEERROR(loss) } template<typename T> NNFloat kCalculateIndexedHingeError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); unsigned long threads = max(32, min(stride, 128)); kCalculateIndexedHingeError_kernel<<<batch, threads>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedHingeError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateMultinomialCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); error = -t * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); error = -t * log(max(MIN_ERROR, a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedMultinomialCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedMultinomialCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedMultinomialCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // HERE 2 template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if (((t == (T)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (T)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ( (NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if (((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) || ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)) - ((NNFloat)1.0 - t) * cData._SMCE_zeroScale * log(max(MIN_ERROR, (NNFloat)1.0 - a)); //printf("%d %llu %f %f %f\n", position, pos, a, t, error); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateMultinomialScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pData); LAUNCHERROR("kCalculateMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; T t = pData[dOffset + pos]; if ((t != (T)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; NNFloat error = (NNFloat)0.0; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); if ((t != (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) error = -t * cData._SMCE_oneScale * log(max(MIN_ERROR, a)); } REDUCEERROR(error) } template<typename T> NNFloat kCalculateIndexedMultinomialScaledMarginalCrossEntropyError(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint32_t* pIndex, T* pData) { cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, stride, pUnit, pIndex, pData); LAUNCHERROR("kCalculateIndexedMultinomialScaledMarginalCrossEntropyError_kernel"); getGpu()._pbAccumulator->Download(); return (NNFloat)((double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template NNFloat kCalculateL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateHingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, T*); \ template NNFloat kCalculateIndexedHingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, T*); \ template NNFloat kCalculateSparseAnalogL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL1Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL2Error<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateIndexedSparseAnalogL2HingeError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*, bool); \ template NNFloat kCalculateSparseAnalogMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateIndexedSparseAnalogMultinomialCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateSparseAnalogMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateIndexedSparseAnalogMultinomialScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat* pSparseWeight, T*); \ template NNFloat kCalculateSparseDataScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template NNFloat kCalculateIndexedSparseDataScaledMarginalCrossEntropyError<T>(uint32_t, uint32_t, uint32_t, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
ba6b1da85c8fbb176c885aec286d029524201190.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif __global__ void testKernel(int val) { printf("[%d, %d]:\t\tValue is:%d\n", \ blockIdx.y*gridDim.x + blockIdx.x, \ threadIdx.z*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x, \ val); } int main(int argc, char **argv) { int devID; hipDeviceProp_t props; // This will pick the best possible CUDA capable device devID = findCudaDevice(argc, (const char **)argv); //Get GPU information checkCudaErrors(hipGetDevice(&devID)); checkCudaErrors(hipGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); printf("printf() is called. Output:\n\n"); //Kernel configuration, where a two-dimensional grid and //three-dimensional blocks are configured. dim3 dimGrid(2, 2); dim3 dimBlock(2, 2, 2); testKernel << <dimGrid, dimBlock >> > (10); hipDeviceSynchronize(); return EXIT_SUCCESS; }
ba6b1da85c8fbb176c885aec286d029524201190.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #ifndef MAX #define MAX(a,b) (a > b ? a : b) #endif __global__ void testKernel(int val) { printf("[%d, %d]:\t\tValue is:%d\n", \ blockIdx.y*gridDim.x + blockIdx.x, \ threadIdx.z*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x, \ val); } int main(int argc, char **argv) { int devID; cudaDeviceProp props; // This will pick the best possible CUDA capable device devID = findCudaDevice(argc, (const char **)argv); //Get GPU information checkCudaErrors(cudaGetDevice(&devID)); checkCudaErrors(cudaGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); printf("printf() is called. Output:\n\n"); //Kernel configuration, where a two-dimensional grid and //three-dimensional blocks are configured. dim3 dimGrid(2, 2); dim3 dimBlock(2, 2, 2); testKernel << <dimGrid, dimBlock >> > (10); cudaDeviceSynchronize(); return EXIT_SUCCESS; }
8e84bd5d38489ea45e6247089bb820ff9d4c7f78.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> extern "C"{ double *DGEMM(double *A, double *B, int rowA, int colA, int rowB, int colB){ double *devA, *devB, *devC, *hostC; hipMalloc((void **) &devA, sizeof(double) * (rowA * colA)); hipMalloc((void **) &devB, sizeof(double) * (rowB * colB)); hipMalloc((void **) &devC, sizeof(double) * (rowA * colB)); hipHostMalloc((void **) &hostC, sizeof(double) * (rowA * colB)); hipMemcpy(devA, A, sizeof(double) * (rowA * colA), hipMemcpyHostToDevice); hipMemcpy(devB, B, sizeof(double) * (rowB * colB), hipMemcpyHostToDevice); hipblasHandle_t handle; hipblasStatus_t status = hipblasCreate(&handle); double alpha = 1.0f;double beta = 0.0f; status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, rowA, colB,\ colA, &alpha, devA, colA, devB, colB, &beta, devC, rowA); status = hipblasDestroy(handle); hipMemcpy(hostC, devC, sizeof(double) * (rowA * colB), hipMemcpyDeviceToHost); hipFree(devA); hipFree(devB); hipFree(devC); return hostC; } }
8e84bd5d38489ea45e6247089bb820ff9d4c7f78.cu
#include <cublas_v2.h> extern "C"{ double *DGEMM(double *A, double *B, int rowA, int colA, int rowB, int colB){ double *devA, *devB, *devC, *hostC; cudaMalloc((void **) &devA, sizeof(double) * (rowA * colA)); cudaMalloc((void **) &devB, sizeof(double) * (rowB * colB)); cudaMalloc((void **) &devC, sizeof(double) * (rowA * colB)); cudaMallocHost((void **) &hostC, sizeof(double) * (rowA * colB)); cudaMemcpy(devA, A, sizeof(double) * (rowA * colA), cudaMemcpyHostToDevice); cudaMemcpy(devB, B, sizeof(double) * (rowB * colB), cudaMemcpyHostToDevice); cublasHandle_t handle; cublasStatus_t status = cublasCreate(&handle); double alpha = 1.0f;double beta = 0.0f; status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, rowA, colB,\ colA, &alpha, devA, colA, devB, colB, &beta, devC, rowA); status = cublasDestroy(handle); cudaMemcpy(hostC, devC, sizeof(double) * (rowA * colB), cudaMemcpyDeviceToHost); cudaFree(devA); cudaFree(devB); cudaFree(devC); return hostC; } }
2f91ed81ae62a56f8fbe2ffbdcb0945c1023f258.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include "test_utils.h" #include <raft/core/cudart_utils.hpp> #include <raft/core/interruptible.hpp> #include <raft/cuda_utils.cuh> #include <random/make_arima.cuh> namespace MLCommon { namespace Random { /* This test only proves that the generator runs without errors, not * correctness! */ struct MakeArimaInputs { int batch_size, n_obs; int p, d, q, P, D, Q, s, k; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeArimaTest : public ::testing::TestWithParam<MakeArimaInputs> { protected: MakeArimaTest() : data(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<MakeArimaInputs>::GetParam(); // Scales of the different random components T scale = 1.0, noise_scale = 0.2; T intercept_scale = params.d + params.D == 0 ? 1.0 : (params.d + params.D == 1 ? 0.2 : 0.01); ML::ARIMAOrder order = { params.p, params.d, params.q, params.P, params.D, params.Q, params.s, params.k}; RAFT_CUDA_TRY(hipStreamCreate(&stream)); data.resize(params.batch_size * params.n_obs, stream); // Create the time series dataset make_arima(data.data(), params.batch_size, params.n_obs, order, stream, scale, noise_scale, intercept_scale, params.seed, params.gtype); } void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); } protected: MakeArimaInputs params; rmm::device_uvector<T> data; hipStream_t stream = 0; }; const std::vector<MakeArimaInputs> make_arima_inputs = { {100, 200, 1, 1, 2, 0, 0, 0, 0, 1, raft::random::GenPhilox, 1234ULL}, {1000, 100, 3, 0, 0, 1, 1, 0, 4, 1, raft::random::GenPhilox, 1234ULL}, {10000, 150, 2, 1, 2, 0, 1, 2, 4, 0, raft::random::GenPhilox, 1234ULL}}; typedef MakeArimaTest<float> MakeArimaTestF; TEST_P(MakeArimaTestF, Result) { raft::interruptible::synchronize(stream); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestF, ::testing::ValuesIn(make_arima_inputs)); typedef MakeArimaTest<double> MakeArimaTestD; TEST_P(MakeArimaTestD, Result) { raft::interruptible::synchronize(stream); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestD, ::testing::ValuesIn(make_arima_inputs)); } // end namespace Random } // end namespace MLCommon
2f91ed81ae62a56f8fbe2ffbdcb0945c1023f258.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include "test_utils.h" #include <raft/core/cudart_utils.hpp> #include <raft/core/interruptible.hpp> #include <raft/cuda_utils.cuh> #include <random/make_arima.cuh> namespace MLCommon { namespace Random { /* This test only proves that the generator runs without errors, not * correctness! */ struct MakeArimaInputs { int batch_size, n_obs; int p, d, q, P, D, Q, s, k; raft::random::GeneratorType gtype; uint64_t seed; }; template <typename T> class MakeArimaTest : public ::testing::TestWithParam<MakeArimaInputs> { protected: MakeArimaTest() : data(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<MakeArimaInputs>::GetParam(); // Scales of the different random components T scale = 1.0, noise_scale = 0.2; T intercept_scale = params.d + params.D == 0 ? 1.0 : (params.d + params.D == 1 ? 0.2 : 0.01); ML::ARIMAOrder order = { params.p, params.d, params.q, params.P, params.D, params.Q, params.s, params.k}; RAFT_CUDA_TRY(cudaStreamCreate(&stream)); data.resize(params.batch_size * params.n_obs, stream); // Create the time series dataset make_arima(data.data(), params.batch_size, params.n_obs, order, stream, scale, noise_scale, intercept_scale, params.seed, params.gtype); } void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); } protected: MakeArimaInputs params; rmm::device_uvector<T> data; cudaStream_t stream = 0; }; const std::vector<MakeArimaInputs> make_arima_inputs = { {100, 200, 1, 1, 2, 0, 0, 0, 0, 1, raft::random::GenPhilox, 1234ULL}, {1000, 100, 3, 0, 0, 1, 1, 0, 4, 1, raft::random::GenPhilox, 1234ULL}, {10000, 150, 2, 1, 2, 0, 1, 2, 4, 0, raft::random::GenPhilox, 1234ULL}}; typedef MakeArimaTest<float> MakeArimaTestF; TEST_P(MakeArimaTestF, Result) { raft::interruptible::synchronize(stream); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestF, ::testing::ValuesIn(make_arima_inputs)); typedef MakeArimaTest<double> MakeArimaTestD; TEST_P(MakeArimaTestD, Result) { raft::interruptible::synchronize(stream); } INSTANTIATE_TEST_CASE_P(MakeArimaTests, MakeArimaTestD, ::testing::ValuesIn(make_arima_inputs)); } // end namespace Random } // end namespace MLCommon
8225ae56d2eb0b78936ecfc8b2adeb5888995769.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "srad.h" #include <stdio.h> __global__ void srad( float *E_C, float *W_C, float *N_C, float *S_C, float *J_cuda, float *C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad2( float *E_C, float *W_C, float *N_C, float *S_C, float *J_cuda, float *C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
8225ae56d2eb0b78936ecfc8b2adeb5888995769.cu
#include "srad.h" #include <stdio.h> __global__ void srad( float *E_C, float *W_C, float *N_C, float *S_C, float *J_cuda, float *C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; } else if ( bx == gridDim.x - 1 ){ east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = 1.0 / (1.0+den) ; // saturate diffusion coefficent if (c < 0){temp_result[ty][tx] = 0;} else if (c > 1) {temp_result[ty][tx] = 1;} else {temp_result[ty][tx] = c;} __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad2( float *E_C, float *W_C, float *N_C, float *S_C, float *J_cuda, float *C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
ac1d6752490a438310ac812f998575a87aa9e4f8.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_context_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } #endif // USE_ROCM } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
ac1d6752490a438310ac812f998575a87aa9e4f8.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_context_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } #endif // USE_CUDA } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_context_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data greentea_gpu_gemm<Dtype>(this->device_context_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
acb31483215a8e42462b0cbc0962b3a58dafbf13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "momentum_sgd_op.h" #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { __global__ void MomentumSGDKernel( const int N, const float* g, const float* m, float* ng, float* nm, const float* lr, const float momentum, const bool nesterov, float* param) { const float LR = lr[0]; if (!nesterov) { CUDA_1D_KERNEL_LOOP(i, N) { const float adjusted_gradient = LR * g[i] + momentum * m[i]; nm[i] = adjusted_gradient; ng[i] = adjusted_gradient; if (param) { param[i] -= adjusted_gradient; } } } else { CUDA_1D_KERNEL_LOOP(i, N) { const float mi = m[i]; const float mi_new = momentum * mi + LR * g[i]; nm[i] = mi_new; ng[i] = (1 + momentum) * mi_new - momentum * mi; if (param) { param[i] -= ng[i]; } } } } template <> void momentum_sgd_update<CUDAContext>( const int N, const float* g, const float* m, float* ng, float* nm, const float* lr, const float momentum, const bool nesterov, float* param, CUDAContext* context) { hipLaunchKernelGGL(( MomentumSGDKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, g, m, ng, nm, lr, momentum, nesterov, param); } template <typename SIndex> __global__ void SparseMomentumSGDKernel( const size_t N, const size_t sz, const float momentum, const bool nesterov, float *param, float *param_mom, const SIndex *indices, const float *gradIn, float *gradOut, const float *lr) { const float LR = lr[0]; CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / sz]; const size_t paramIdx = index * sz + (i % sz); if (!nesterov) { const float adjusted_gradient = LR * gradIn[gradIdx] + momentum * param_mom[paramIdx]; gradOut[gradIdx] = adjusted_gradient; param_mom[paramIdx] = adjusted_gradient; param[paramIdx] -= adjusted_gradient; } else { const float mom_old = param_mom[paramIdx]; const float mom_new = LR * gradIn[gradIdx] + momentum * mom_old; param_mom[paramIdx] = mom_new; const float adjusted_gradient = (1 + momentum) * mom_new - momentum * mom_old; gradOut[gradIdx] = adjusted_gradient; param[paramIdx] -= adjusted_gradient; } } } // Specialization of DoRunWithType for CUDA template <> template <typename SIndex> bool SparseMomentumSGDUpdateOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim()); hipLaunchKernelGGL(( SparseMomentumSGDKernel<SIndex>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, grad_slice_sz, momentum_, nesterov_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENTUM)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Output(OUTPUT_GRAD)->template mutable_data<float>(), Input(LR).template data<float>()); return true; } REGISTER_CUDA_OPERATOR(MomentumSGD, MomentumSGDOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MomentumSGDUpdate, MomentumSGDUpdateOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SparseMomentumSGDUpdate, SparseMomentumSGDUpdateOp<float, CUDAContext>); }
acb31483215a8e42462b0cbc0962b3a58dafbf13.cu
#include "momentum_sgd_op.h" #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { __global__ void MomentumSGDKernel( const int N, const float* g, const float* m, float* ng, float* nm, const float* lr, const float momentum, const bool nesterov, float* param) { const float LR = lr[0]; if (!nesterov) { CUDA_1D_KERNEL_LOOP(i, N) { const float adjusted_gradient = LR * g[i] + momentum * m[i]; nm[i] = adjusted_gradient; ng[i] = adjusted_gradient; if (param) { param[i] -= adjusted_gradient; } } } else { CUDA_1D_KERNEL_LOOP(i, N) { const float mi = m[i]; const float mi_new = momentum * mi + LR * g[i]; nm[i] = mi_new; ng[i] = (1 + momentum) * mi_new - momentum * mi; if (param) { param[i] -= ng[i]; } } } } template <> void momentum_sgd_update<CUDAContext>( const int N, const float* g, const float* m, float* ng, float* nm, const float* lr, const float momentum, const bool nesterov, float* param, CUDAContext* context) { MomentumSGDKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, g, m, ng, nm, lr, momentum, nesterov, param); } template <typename SIndex> __global__ void SparseMomentumSGDKernel( const size_t N, const size_t sz, const float momentum, const bool nesterov, float *param, float *param_mom, const SIndex *indices, const float *gradIn, float *gradOut, const float *lr) { const float LR = lr[0]; CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / sz]; const size_t paramIdx = index * sz + (i % sz); if (!nesterov) { const float adjusted_gradient = LR * gradIn[gradIdx] + momentum * param_mom[paramIdx]; gradOut[gradIdx] = adjusted_gradient; param_mom[paramIdx] = adjusted_gradient; param[paramIdx] -= adjusted_gradient; } else { const float mom_old = param_mom[paramIdx]; const float mom_new = LR * gradIn[gradIdx] + momentum * mom_old; param_mom[paramIdx] = mom_new; const float adjusted_gradient = (1 + momentum) * mom_new - momentum * mom_old; gradOut[gradIdx] = adjusted_gradient; param[paramIdx] -= adjusted_gradient; } } } // Specialization of DoRunWithType for CUDA template <> template <typename SIndex> bool SparseMomentumSGDUpdateOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim()); SparseMomentumSGDKernel<SIndex><<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, grad_slice_sz, momentum_, nesterov_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENTUM)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Output(OUTPUT_GRAD)->template mutable_data<float>(), Input(LR).template data<float>()); return true; } REGISTER_CUDA_OPERATOR(MomentumSGD, MomentumSGDOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MomentumSGDUpdate, MomentumSGDUpdateOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SparseMomentumSGDUpdate, SparseMomentumSGDUpdateOp<float, CUDAContext>); }
020cff9a81f6d220cf070754d8803066f7b02096.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <tuple> template <typename scalar_t> __global__ void InterpFaceAttrsForwardKernel( const int64_t* __restrict__ pix_to_face, // (P,) const scalar_t* __restrict__ barycentric_coords, // (P, 3) const scalar_t* __restrict__ face_attrs, // (F, 3, D) scalar_t* pix_attrs, // (P, D) const size_t P, const size_t F, const size_t D) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int num_threads = blockDim.x * gridDim.x; for (int pd = tid; pd < P * D; pd += num_threads) { const int p = pd / D; const int d = pd % D; const int64_t f = pix_to_face[p]; if (f < 0) { continue; } scalar_t pix_attr = 0.0; for (int i = 0; i < 3; ++i) { scalar_t weight = barycentric_coords[p * 3 + i]; scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; pix_attr += weight * vert_attr; } pix_attrs[p * D + d] = pix_attr; } } at::Tensor InterpFaceAttrsForwardCuda( const at::Tensor& pix_to_face, const at::Tensor& barycentric_coords, const at::Tensor& face_attrs) { // Make sure all inputs are on the same device at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, face_attrs_t{face_attrs, "face_attributes", 3}; at::CheckedFrom c = "InterpFaceAttrsForwardCuda"; at::checkAllSameGPU(c, {pix_to_face_t, barycentric_coords_t, face_attrs_t}); at::checkAllSameType(c, {barycentric_coords_t, face_attrs_t}); // Set the device for the kernel launch based on the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(pix_to_face.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto P = pix_to_face.size(0); const auto F = face_attrs.size(0); const auto D = face_attrs.size(2); TORCH_CHECK( barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, "barycentric_coords must have size (P, 3)"); TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); auto pix_attrs = at::zeros({P, D}, face_attrs.options()); const int threads = 1024; const int blocks = 512; AT_DISPATCH_FLOATING_TYPES( face_attrs.scalar_type(), "interp_face_attrs_cuda", ([&] { hipLaunchKernelGGL(( InterpFaceAttrsForwardKernel), dim3(blocks), dim3(threads), 0, stream, pix_to_face.contiguous().data_ptr<int64_t>(), barycentric_coords.contiguous().data_ptr<scalar_t>(), face_attrs.contiguous().data_ptr<scalar_t>(), pix_attrs.contiguous().data_ptr<scalar_t>(), P, F, D); })); AT_CUDA_CHECK(hipGetLastError()); return pix_attrs; } template <typename scalar_t> __global__ void InterpFaceAttrsBackwardKernel( const int64_t* __restrict__ pix_to_face, // (P,) const scalar_t* __restrict__ barycentric_coords, // (P, 3) const scalar_t* __restrict__ face_attrs, // (F, 3, D) const scalar_t* __restrict__ grad_pix_attrs, // (P, D) scalar_t* __restrict__ grad_barycentric_coords, // (P, 3) scalar_t* __restrict__ grad_face_attrs, // (F, 3, D) const size_t P, const size_t F, const size_t D) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int num_threads = blockDim.x * gridDim.x; for (int pd = tid; pd < P * D; pd += num_threads) { const int p = pd / D; const int d = pd % D; const int64_t f = pix_to_face[p]; if (f < 0) { continue; } scalar_t upstream_grad = grad_pix_attrs[p * D + d]; for (int i = 0; i < 3; ++i) { scalar_t weight = barycentric_coords[p * 3 + i]; scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; scalar_t grad_bary_down = vert_attr * upstream_grad; scalar_t grad_face_down = weight * upstream_grad; atomicAdd(grad_barycentric_coords + p * 3 + i, grad_bary_down); atomicAdd(grad_face_attrs + f * 3 * D + i * D + d, grad_face_down); } } } std::tuple<at::Tensor, at::Tensor> InterpFaceAttrsBackwardCuda( const at::Tensor& pix_to_face, const at::Tensor& barycentric_coords, const at::Tensor& face_attrs, const at::Tensor& grad_pix_attrs) { // Make sure all inputs are on the same device at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, face_attrs_t{face_attrs, "face_attributes", 3}, grad_pix_attrs_t{grad_pix_attrs, "pix_attrs", 4}; at::CheckedFrom c = "InterpFaceAttrsBackwarduda"; at::checkAllSameGPU( c, {pix_to_face_t, barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); at::checkAllSameType( c, {barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); // Set the device for the kernel launch based on the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(pix_to_face.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const auto P = pix_to_face.size(0); const auto F = face_attrs.size(0); const auto D = face_attrs.size(2); TORCH_CHECK( barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, "barycentric_coords must have size (P, 3)"); TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); TORCH_CHECK( grad_pix_attrs.size(0) == P && grad_pix_attrs.size(1) == D, "grad_pix_attrs must have size (P, D)"); auto grad_barycentric_coords = at::zeros_like(barycentric_coords); auto grad_face_attrs = at::zeros_like(face_attrs); const int threads = 1024; const int blocks = 512; // Only allow float for now. // TODO: Add support for double once we fix atomicAdd // clang-format off hipLaunchKernelGGL(( InterpFaceAttrsBackwardKernel), dim3(blocks), dim3(threads), 0, stream, pix_to_face.contiguous().data_ptr<int64_t>(), barycentric_coords.contiguous().data_ptr<float>(), face_attrs.contiguous().data_ptr<float>(), grad_pix_attrs.contiguous().data_ptr<float>(), grad_barycentric_coords.contiguous().data_ptr<float>(), grad_face_attrs.contiguous().data_ptr<float>(), P, F, D); AT_CUDA_CHECK(hipGetLastError()); // clang-format on return std::make_tuple(grad_barycentric_coords, grad_face_attrs); }
020cff9a81f6d220cf070754d8803066f7b02096.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <tuple> template <typename scalar_t> __global__ void InterpFaceAttrsForwardKernel( const int64_t* __restrict__ pix_to_face, // (P,) const scalar_t* __restrict__ barycentric_coords, // (P, 3) const scalar_t* __restrict__ face_attrs, // (F, 3, D) scalar_t* pix_attrs, // (P, D) const size_t P, const size_t F, const size_t D) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int num_threads = blockDim.x * gridDim.x; for (int pd = tid; pd < P * D; pd += num_threads) { const int p = pd / D; const int d = pd % D; const int64_t f = pix_to_face[p]; if (f < 0) { continue; } scalar_t pix_attr = 0.0; for (int i = 0; i < 3; ++i) { scalar_t weight = barycentric_coords[p * 3 + i]; scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; pix_attr += weight * vert_attr; } pix_attrs[p * D + d] = pix_attr; } } at::Tensor InterpFaceAttrsForwardCuda( const at::Tensor& pix_to_face, const at::Tensor& barycentric_coords, const at::Tensor& face_attrs) { // Make sure all inputs are on the same device at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, face_attrs_t{face_attrs, "face_attributes", 3}; at::CheckedFrom c = "InterpFaceAttrsForwardCuda"; at::checkAllSameGPU(c, {pix_to_face_t, barycentric_coords_t, face_attrs_t}); at::checkAllSameType(c, {barycentric_coords_t, face_attrs_t}); // Set the device for the kernel launch based on the input at::cuda::CUDAGuard device_guard(pix_to_face.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto P = pix_to_face.size(0); const auto F = face_attrs.size(0); const auto D = face_attrs.size(2); TORCH_CHECK( barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, "barycentric_coords must have size (P, 3)"); TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); auto pix_attrs = at::zeros({P, D}, face_attrs.options()); const int threads = 1024; const int blocks = 512; AT_DISPATCH_FLOATING_TYPES( face_attrs.scalar_type(), "interp_face_attrs_cuda", ([&] { InterpFaceAttrsForwardKernel<<<blocks, threads, 0, stream>>>( pix_to_face.contiguous().data_ptr<int64_t>(), barycentric_coords.contiguous().data_ptr<scalar_t>(), face_attrs.contiguous().data_ptr<scalar_t>(), pix_attrs.contiguous().data_ptr<scalar_t>(), P, F, D); })); AT_CUDA_CHECK(cudaGetLastError()); return pix_attrs; } template <typename scalar_t> __global__ void InterpFaceAttrsBackwardKernel( const int64_t* __restrict__ pix_to_face, // (P,) const scalar_t* __restrict__ barycentric_coords, // (P, 3) const scalar_t* __restrict__ face_attrs, // (F, 3, D) const scalar_t* __restrict__ grad_pix_attrs, // (P, D) scalar_t* __restrict__ grad_barycentric_coords, // (P, 3) scalar_t* __restrict__ grad_face_attrs, // (F, 3, D) const size_t P, const size_t F, const size_t D) { const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int num_threads = blockDim.x * gridDim.x; for (int pd = tid; pd < P * D; pd += num_threads) { const int p = pd / D; const int d = pd % D; const int64_t f = pix_to_face[p]; if (f < 0) { continue; } scalar_t upstream_grad = grad_pix_attrs[p * D + d]; for (int i = 0; i < 3; ++i) { scalar_t weight = barycentric_coords[p * 3 + i]; scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; scalar_t grad_bary_down = vert_attr * upstream_grad; scalar_t grad_face_down = weight * upstream_grad; atomicAdd(grad_barycentric_coords + p * 3 + i, grad_bary_down); atomicAdd(grad_face_attrs + f * 3 * D + i * D + d, grad_face_down); } } } std::tuple<at::Tensor, at::Tensor> InterpFaceAttrsBackwardCuda( const at::Tensor& pix_to_face, const at::Tensor& barycentric_coords, const at::Tensor& face_attrs, const at::Tensor& grad_pix_attrs) { // Make sure all inputs are on the same device at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, face_attrs_t{face_attrs, "face_attributes", 3}, grad_pix_attrs_t{grad_pix_attrs, "pix_attrs", 4}; at::CheckedFrom c = "InterpFaceAttrsBackwarduda"; at::checkAllSameGPU( c, {pix_to_face_t, barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); at::checkAllSameType( c, {barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); // Set the device for the kernel launch based on the input at::cuda::CUDAGuard device_guard(pix_to_face.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto P = pix_to_face.size(0); const auto F = face_attrs.size(0); const auto D = face_attrs.size(2); TORCH_CHECK( barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, "barycentric_coords must have size (P, 3)"); TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); TORCH_CHECK( grad_pix_attrs.size(0) == P && grad_pix_attrs.size(1) == D, "grad_pix_attrs must have size (P, D)"); auto grad_barycentric_coords = at::zeros_like(barycentric_coords); auto grad_face_attrs = at::zeros_like(face_attrs); const int threads = 1024; const int blocks = 512; // Only allow float for now. // TODO: Add support for double once we fix atomicAdd // clang-format off InterpFaceAttrsBackwardKernel<<<blocks, threads, 0, stream>>>( pix_to_face.contiguous().data_ptr<int64_t>(), barycentric_coords.contiguous().data_ptr<float>(), face_attrs.contiguous().data_ptr<float>(), grad_pix_attrs.contiguous().data_ptr<float>(), grad_barycentric_coords.contiguous().data_ptr<float>(), grad_face_attrs.contiguous().data_ptr<float>(), P, F, D); AT_CUDA_CHECK(cudaGetLastError()); // clang-format on return std::make_tuple(grad_barycentric_coords, grad_face_attrs); }
7e7c897f8a9d78120be68bd74055c160d0f6d43f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../SDDK/GPU/cuda_common.h" #include "../SDDK/GPU/cuda.hpp" __global__ void sum_q_pw_dm_pw_gpu_kernel ( int nbf__, double const* q_pw__, double const* dm_pw__, double const* sym_weight__, hipDoubleComplex* rho_pw__ ) { extern __shared__ char sdata_ptr[]; double* rho_re = (double*)&sdata_ptr[0]; double* rho_im = (double*)&sdata_ptr[sizeof(double) * blockDim.x]; int igloc = blockIdx.x; rho_re[threadIdx.x] = 0; rho_im[threadIdx.x] = 0; int ld = nbf__ * (nbf__ + 1) / 2; int N = num_blocks(ld, blockDim.x); for (int n = 0; n < N; n++) { int i = n * blockDim.x + threadIdx.x; if (i < ld) { double qx = q_pw__[array2D_offset(i, 2 * igloc, ld)]; double qy = q_pw__[array2D_offset(i, 2 * igloc + 1, ld)]; double dx = dm_pw__[array2D_offset(i, 2 * igloc, ld)]; double dy = dm_pw__[array2D_offset(i, 2 * igloc + 1, ld)]; rho_re[threadIdx.x] += sym_weight__[i] * (dx * qx - dy * qy); rho_im[threadIdx.x] += sym_weight__[i] * (dy * qx + dx * qy); } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) { rho_re[threadIdx.x] = rho_re[threadIdx.x] + rho_re[threadIdx.x + s]; rho_im[threadIdx.x] = rho_im[threadIdx.x] + rho_im[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { rho_pw__[igloc] = cuCadd(rho_pw__[igloc], make_cuDoubleComplex(rho_re[0], rho_im[0])); } } extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__, int nbf__, double const* q_pw__, double const* dm_pw__, double const* sym_weight__, hipDoubleComplex* rho_pw__, int stream_id__) { CUDA_timer t("sum_q_pw_dm_pw_gpu"); hipStream_t stream = acc::stream(stream_id__); dim3 grid_t(64); dim3 grid_b(num_gvec_loc__); hipLaunchKernelGGL(( sum_q_pw_dm_pw_gpu_kernel) , dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), stream, nbf__, q_pw__, dm_pw__, sym_weight__, rho_pw__ ); }
7e7c897f8a9d78120be68bd74055c160d0f6d43f.cu
#include "../SDDK/GPU/cuda_common.h" #include "../SDDK/GPU/cuda.hpp" __global__ void sum_q_pw_dm_pw_gpu_kernel ( int nbf__, double const* q_pw__, double const* dm_pw__, double const* sym_weight__, cuDoubleComplex* rho_pw__ ) { extern __shared__ char sdata_ptr[]; double* rho_re = (double*)&sdata_ptr[0]; double* rho_im = (double*)&sdata_ptr[sizeof(double) * blockDim.x]; int igloc = blockIdx.x; rho_re[threadIdx.x] = 0; rho_im[threadIdx.x] = 0; int ld = nbf__ * (nbf__ + 1) / 2; int N = num_blocks(ld, blockDim.x); for (int n = 0; n < N; n++) { int i = n * blockDim.x + threadIdx.x; if (i < ld) { double qx = q_pw__[array2D_offset(i, 2 * igloc, ld)]; double qy = q_pw__[array2D_offset(i, 2 * igloc + 1, ld)]; double dx = dm_pw__[array2D_offset(i, 2 * igloc, ld)]; double dy = dm_pw__[array2D_offset(i, 2 * igloc + 1, ld)]; rho_re[threadIdx.x] += sym_weight__[i] * (dx * qx - dy * qy); rho_im[threadIdx.x] += sym_weight__[i] * (dy * qx + dx * qy); } } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (threadIdx.x % (2 * s) == 0) { rho_re[threadIdx.x] = rho_re[threadIdx.x] + rho_re[threadIdx.x + s]; rho_im[threadIdx.x] = rho_im[threadIdx.x] + rho_im[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { rho_pw__[igloc] = cuCadd(rho_pw__[igloc], make_cuDoubleComplex(rho_re[0], rho_im[0])); } } extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__, int nbf__, double const* q_pw__, double const* dm_pw__, double const* sym_weight__, cuDoubleComplex* rho_pw__, int stream_id__) { CUDA_timer t("sum_q_pw_dm_pw_gpu"); cudaStream_t stream = acc::stream(stream_id__); dim3 grid_t(64); dim3 grid_b(num_gvec_loc__); sum_q_pw_dm_pw_gpu_kernel <<<grid_b, grid_t, 2 * grid_t.x * sizeof(double), stream>>> ( nbf__, q_pw__, dm_pw__, sym_weight__, rho_pw__ ); }
3f0f03923f93713ee41a217a4e814051f8bdbdc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = 0; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += (x[index] + y[idx][idy]); offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, count, size); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fadd_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fsubtract_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fmultiply_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Fdivide_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fmultiply_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { hipLaunchKernelGGL(( Fadd_scalar_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsquare_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Ftanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, size); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ hipLaunchKernelGGL(( Dtanh_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, dim0, size); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { hipLaunchKernelGGL(( Dsigmoid_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, r, size); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { hipLaunchKernelGGL(( Fsqrt_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, size); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( concat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { hipLaunchKernelGGL(( unconcat_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, src, dst, offset, dim); } __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { hipLaunchKernelGGL(( Ftranspose_kernel), dim3((size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, dim1, size); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { hipLaunchKernelGGL(( set_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, dim0, col, size, val); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( FLookup_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { hipLaunchKernelGGL(( DLookup_kernel), dim3((l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gx, loss, gxdim0, gxdim1, l_size, cols, col_num); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { hipLaunchKernelGGL(( get_cols_kernel), dim3((r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { hipLaunchKernelGGL(( get_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, r, dim0, col, size); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { hipLaunchKernelGGL(( Fadd_col_kernel), dim3((dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, x, y, col, dim0, size); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Favgpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { hipLaunchKernelGGL(( Davgpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, n, gx); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fsumpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { hipLaunchKernelGGL(( Dsumpooling_kernel), dim3((gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gy_size, gx_size, gx); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fmaxpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dmaxpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k:hipLaunchKernelGGL(( ::Fminpooling_kernel<k>), dim3(r), dim3(k), 0, 0, x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { hipLaunchKernelGGL(( Dminpooling_kernel), dim3((dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, gy, gx, index, dim); }
3f0f03923f93713ee41a217a4e814051f8bdbdc4.cu
#include "kernel.cuh" #define THREADS_PER_BLOCK 1024 __global__ void Fadd_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y[index]; } } void Fadd_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fadd_kernel(const dtype* x, dtype** y, dtype* r, int count, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { dtype sum = 0; int offset = 0; for(int idx = 0; idx < count; idx++) { int global = index + offset; int idx = global / size; int idy = global % size; sum += (x[index] + y[idx][idy]); offset += size; } r[index] = sum; } } void Fadd_impl(const dtype* x, dtype** y, dtype* r, int count, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, count, size); } __global__ void Fadd_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] + y[idx][idy]; } } void Fadd_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fadd_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); } __global__ void Fsubtract_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] - y[index]; } } void Fsubtract_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fsubtract_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fmultiply_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y[index]; } } void Fmultiply_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fmultiply_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = x[idx][idy] * y[idx][idy]; } } void Fmultiply_impl(dtype** x, dtype** y, dtype** r, int dim0, int size) { Fmultiply_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); } __global__ void Fdivide_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] / y[index]; } } void Fdivide_impl(const dtype* x, const dtype* y, dtype* r, int size) { Fdivide_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fmultiply_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * y; } } void Fmultiply_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fmultiply_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fadd_scalar_kernel(const dtype* x, const dtype y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] + y; } } void Fadd_scalar_impl(const dtype* x, const dtype y, dtype* r, int size) { Fadd_scalar_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fsquare_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index] * x[index]; } } void Fsquare_impl(const dtype* x, dtype* r, int size) { Fsquare_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); } __global__ void Ftanh_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = tanh(x[index]); } } void Ftanh_impl(const dtype* x, dtype* r, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); } __global__ void Ftanh_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = tanh(x[idx][idy]); } } void Ftanh_impl(dtype** x, dtype** r, int dim0, int size) { Ftanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); } __global__ void Fsigmoid_kernel(dtype** x, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = 1.0 / (1.0 + exp(-x[idx][idy])); } } void Fsigmoid_impl(dtype** x, dtype** r, int dim0, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, size); } __global__ void Dsigmoid_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 - y[idx][idy]) * y[idx][idy]; } } void Dsigmoid_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); } __global__ void Dtanh_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 + y[index]) * (1 - y[index]); } } void Dtanh_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Dtanh_kernel(dtype** x, dtype** y, dtype** r, int dim0, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { int idx = index / dim0; int idy = index % dim0; r[idx][idy] = (1 + y[idx][idy]) * (1 - y[idx][idy]); } } void Dtanh_impl(dtype** x, dtype** y, dtype** r, int dim0, int size){ Dtanh_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, dim0, size); } __global__ void Fsigmoid_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = 1.0 / (1.0 + exp(-x[index])); } } void Fsigmoid_impl(const dtype* x, dtype* r, int size) { Fsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); } __global__ void Dsigmoid_kernel(const dtype* x, const dtype* y, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = (1 - y[index]) * y[index]; } } void Dsigmoid_impl(const dtype* x, const dtype* y, dtype* r, int size) { Dsigmoid_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, r, size); } __global__ void Fsqrt_kernel(const dtype* x, dtype* r, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = sqrt(x[index]); } } void Fsqrt_impl(const dtype* x, dtype* r, int size) { Fsqrt_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, size); } __global__ void concat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[offset + index] = src[index]; } } void concat_impl(const dtype *src, dtype* dst, int offset, int dim) { concat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); } __global__ void unconcat_kernel(const dtype *src, dtype* dst, int offset, int dim) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < dim) { dst[index] = src[offset + index]; } } void unconcat_impl(const dtype *src, dtype* dst, int offset, int dim) { unconcat_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(src, dst, offset, dim); } __global__ void Ftranspose_kernel(const dtype* x, dtype* r, int dim0, int dim1, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < size) { r[index] = x[index % dim0 * dim1 + index / dim0]; } } void Ftranspose_impl(const dtype* x, dtype* r, int dim0, int dim1, int size) { Ftranspose_kernel<<<(size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, dim1, size); } __global__ void set_col_kernel(dtype* x, int dim0, int col, int size, dtype val) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = val; } } void set_col_impl(dtype* x, int dim0, int col, int size, dtype val) { set_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, dim0, col, size, val); } __global__ void FLookup_kernel(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[col_index][offset] = x[x_index]; } } } } void FLookup_impl(const dtype* x, dtype** r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { FLookup_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void DLookup_kernel(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < l_size) { int col_index = index / gxdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % gxdim0; int gx_index = col * gxdim0 + offset; if(gx_index < gxdim0 * gxdim1) { atomicAdd(gx + gx_index, loss[col_index][offset]); //gx[gx_index] += loss[col_index][offset]; } } } } void DLookup_impl(dtype* gx, dtype** loss, int gxdim0, int gxdim1, int l_size, int* cols, int col_num) { DLookup_kernel<<<(l_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (gx, loss, gxdim0, gxdim1, l_size, cols, col_num); } __global__ void get_cols_kernel(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < r_size) { int col_index = index / xdim0; if(col_index < col_num) { int col = cols[col_index]; int offset = index % xdim0; int x_index = col * xdim0 + offset; if(x_index < xdim0 * xdim1) { r[index] = x[x_index]; } } } } void get_cols_impl(const dtype* x, dtype* r, int xdim0, int xdim1, int r_size, int* cols, int col_num) { get_cols_kernel<<<(r_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>> (x, r, xdim0, xdim1, r_size, cols, col_num); } __global__ void get_col_kernel(const dtype* x, dtype* r, int dim0, int col, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { r[index] = x[i]; } } void get_col_impl(const dtype* x, dtype* r, int dim0, int col, int size) { get_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, r, dim0, col, size); } __global__ void Fadd_col_kernel(dtype* x, const dtype* y, int col, int dim0, int size){ int index = threadIdx.x + blockIdx.x * blockDim.x; int i = index + col * dim0; if (i < size && index < dim0) { x[i] = x[i] + y[index]; } } void Fadd_col_impl(dtype* x, const dtype* y, int col, int dim0, int size) { Fadd_col_kernel<<<(dim0 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(x, y, col, dim0, size); } template<int BLOCK_SIZE> __global__ void Favgpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; temp[tid] += px[idx][idy]; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0] / n; } void Favgpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Favgpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Davgpooling_kernel(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy] / n); //gx[idx][idy] += (gy[idy] / n); } } void Davgpooling_impl(const dtype* gy, int gy_size, int gx_size, int n, dtype** gx) { Davgpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, n, gx); } template<int BLOCK_SIZE> __global__ void Fsumpooling_kernel( dtype **px, int skip, int n, dtype *py) { __shared__ dtype temp[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; int index_start = bid % skip + (bid / skip) * skip * n; temp[tid] = 0; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; temp[tid] += val; } ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) temp[tid] += temp[tid + k]; \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) py[bid] = temp[0]; } void Fsumpooling_impl(dtype** x, dtype* y, int n, int r, int s) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fsumpooling_kernel<k><<<r, k>>>(x, s, n, y); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dsumpooling_kernel(const dtype* gy, int gy_size, int gx_size, dtype** gx) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < gx_size) { int idx = i / gy_size; int idy = i % gy_size; atomicAdd(gx[idx] + idy, gy[idy]); //gx[idx][idy] += gy[idy]; } } void Dsumpooling_impl(const dtype* gy, int gy_size, int gx_size, dtype** gx) { Dsumpooling_kernel<<<(gx_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gy_size, gx_size, gx); } template<int BLOCK_SIZE> __global__ void Fmaxpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_max = NEGATIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_max; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val > thread_max) { thread_max = val; index_max = index_start + i * skip; } } temp[tid] = thread_max; temp_index[tid] = index_max; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] > temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fmaxpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index){ int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fmaxpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dmaxpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dmaxpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dmaxpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); } template<int BLOCK_SIZE> __global__ void Fminpooling_kernel( dtype **px, int skip, int n, dtype *py, int* index) { __shared__ dtype temp[BLOCK_SIZE]; __shared__ int temp_index[BLOCK_SIZE]; const int bid = blockIdx.x; const int tid = threadIdx.x; //px += bid % skip + (bid / skip) * skip * n; dtype thread_min = POSITIVE_INFINITY; int index_start = bid % skip + (bid / skip) * skip * n; int index_min; for (int i = tid; i < n; i += BLOCK_SIZE) { int global = index_start + i * skip; int idx = global / skip; int idy = global % skip; dtype val = px[idx][idy]; if(val < thread_min) { thread_min = val; index_min = index_start + i * skip; } } temp[tid] = thread_min; temp_index[tid] = index_min; ::__syncthreads(); #define REDUCE(k) \ if (BLOCK_SIZE >= k << 1) { \ if (tid < k) if(temp[tid + k] < temp[tid]) {temp[tid] = temp[tid + k]; temp_index[tid] = temp_index[tid + k];} \ ::__syncthreads(); \ } REDUCE(512) REDUCE(256) REDUCE(128) REDUCE(64) REDUCE(32) REDUCE(16) REDUCE(8) REDUCE(4) REDUCE(2) REDUCE(1) #undef REDUCE if (tid == 0) {py[bid] = temp[0]; index[bid] = temp_index[0];} } void Fminpooling_impl(dtype** x, dtype* y, int n, int r, int s, int* index) { int block_size = THREADS_PER_BLOCK; while (block_size >> 1 >= n) block_size >>= 1; switch (block_size) { #define CASE(k) \ case k: ::Fminpooling_kernel<k><<<r, k>>>(x, s, n, y, index); break CASE(1024); CASE(512); CASE(256); CASE(128); CASE(64); CASE(32); CASE(16); CASE(8); CASE(4); CASE(2); CASE(1); #undef CASE } } __global__ void Dminpooling_kernel(const dtype* gy, dtype** gx, int* index, int dim) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < dim) { int idx = index[i] / dim; int idy = index[i] % dim; atomicAdd(gx[idx] + idy, gy[i]); //gx[idx][idy] += gy[i]; } } void Dminpooling_impl(const dtype* gy, dtype** gx, int* index, int dim) { Dminpooling_kernel<<<(dim + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(gy, gx, index, dim); }
unaryops.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Code edits and additions * Copyright 2018 Rommel Quintanilla <[email protected]> */ #include <cmath> #include <algorithm> #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include "thrust_rmm_allocator.h" template<typename T, typename Tout, typename F> __global__ void gpu_unary_op(const T *data, const gdf_valid_type *valid, gdf_size_type size, Tout *results, F functor) { int tid = threadIdx.x; int blkid = blockIdx.x; int blksz = blockDim.x; int gridsz = gridDim.x; int start = tid + blkid * blksz; int step = blksz * gridsz; if ( valid ) { // has valid mask for (int i=start; i<size; i+=step) { if ( gdf_is_valid(valid, i) ) results[i] = functor.apply(data[i]); } } else { // no valid mask for (int i=start; i<size; i+=step) { results[i] = functor.apply(data[i]); } } } template<typename T, typename Tout, typename F> struct UnaryOp { static gdf_error launch(gdf_column *input, gdf_column *output) { // Return immediately for empty inputs if((0==input->size)) { return GDF_SUCCESS; } /* check for size of the columns */ if (input->size != output->size) { return GDF_COLUMN_SIZE_MISMATCH; } // find optimal blocksize int mingridsize, blocksize; CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize, gpu_unary_op<T, Tout, F>) ); // find needed gridsize int neededgridsize = (input->size + blocksize - 1) / blocksize; int gridsize = ::min(neededgridsize, mingridsize); F functor; hipLaunchKernelGGL(( gpu_unary_op), dim3(gridsize), dim3(blocksize), 0, 0, // input (const T*)input->data, input->valid, input->size, // output (Tout*)output->data, // action functor ); CUDA_CHECK_LAST(); return GDF_SUCCESS; } }; template<typename T, typename F> struct MathOp { static gdf_error launch(gdf_column *input, gdf_column *output) { return UnaryOp<T, T, F>::launch(input, output); } }; #define DEF_UNARY_OP_REAL(F) \ gdf_error F##_generic(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_FLOAT32: return F##_f32(input, output); \ case GDF_FLOAT64: return F##_f64(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP_TS(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } // trig functions template<typename T> struct DeviceSin { __device__ T apply(T data) { return std::sin(data); } }; template<typename T> struct DeviceCos { __device__ T apply(T data) { return std::cos(data); } }; template<typename T> struct DeviceTan { __device__ T apply(T data) { return std::tan(data); } }; template<typename T> struct DeviceArcSin { __device__ T apply(T data) { return std::asin(data); } }; template<typename T> struct DeviceArcCos { __device__ T apply(T data) { return std::acos(data); } }; template<typename T> struct DeviceArcTan { __device__ T apply(T data) { return std::atan(data); } }; DEF_UNARY_OP_REAL(gdf_sin) gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSin<float> >::launch(input, output); } gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_cos) gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCos<float> >::launch(input, output); } gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_tan) gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceTan<float> >::launch(input, output); } gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceTan<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_asin) gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcSin<float> >::launch(input, output); } gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_acos) gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcCos<float> >::launch(input, output); } gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_atan) gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcTan<float> >::launch(input, output); } gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcTan<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceExp { __device__ T apply(T data) { return ::exp(data); } }; template<typename T> struct DeviceLog { __device__ T apply(T data) { return ::log(data); } }; DEF_UNARY_OP_REAL(gdf_exp) gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceExp<float> >::launch(input, output); } gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceExp<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_log) gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceLog<float> >::launch(input, output); } gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceLog<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceSqrt { __device__ T apply(T data) { return std::sqrt(data); } }; DEF_UNARY_OP_REAL(gdf_sqrt) gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSqrt<float> >::launch(input, output); } gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSqrt<double> >::launch(input, output); } // rounding functions template<typename T> struct DeviceCeil { __device__ T apply(T data) { return ::ceil(data); } }; template<typename T> struct DeviceFloor { __device__ T apply(T data) { return ::floor(data); } }; DEF_UNARY_OP_REAL(gdf_ceil) gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCeil<float> >::launch(input, output); } gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCeil<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_floor) gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceFloor<float> >::launch(input, output); } gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceFloor<double> >::launch(input, output); } // casting template<typename From, typename To> struct DeviceCast { __device__ To apply(From data) { return (To)data; } }; template<typename From, typename To, int64_t units_factor> struct UpCasting { __device__ To apply(From data) { return (To)(data*units_factor); } }; template<typename From, typename To, int64_t units_factor> struct DownCasting { __device__ To apply(From data) { return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative } }; // Castings are differentiate between physical and logical ones. // In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double) // on the other hand, casting between date timestamps needs also perform some calculations according to the time unit: // - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day // - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3 // No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing #define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ hipStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::hip::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } // Castings functions where Timestamp is the destination type #define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ hipStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ output->dtype_info.time_unit = time_unit; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::hip::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \ { \ if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ } \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } #define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP(ABREV) \ DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) #define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP_TS(ABREV) \ DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32) DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64) DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8) DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32) DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64) DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32) DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64) DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
unaryops.cu
/* * * Code edits and additions * Copyright 2018 Rommel Quintanilla <[email protected]> */ #include <cmath> #include <algorithm> #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include "thrust_rmm_allocator.h" template<typename T, typename Tout, typename F> __global__ void gpu_unary_op(const T *data, const gdf_valid_type *valid, gdf_size_type size, Tout *results, F functor) { int tid = threadIdx.x; int blkid = blockIdx.x; int blksz = blockDim.x; int gridsz = gridDim.x; int start = tid + blkid * blksz; int step = blksz * gridsz; if ( valid ) { // has valid mask for (int i=start; i<size; i+=step) { if ( gdf_is_valid(valid, i) ) results[i] = functor.apply(data[i]); } } else { // no valid mask for (int i=start; i<size; i+=step) { results[i] = functor.apply(data[i]); } } } template<typename T, typename Tout, typename F> struct UnaryOp { static gdf_error launch(gdf_column *input, gdf_column *output) { // Return immediately for empty inputs if((0==input->size)) { return GDF_SUCCESS; } /* check for size of the columns */ if (input->size != output->size) { return GDF_COLUMN_SIZE_MISMATCH; } // find optimal blocksize int mingridsize, blocksize; CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&mingridsize, &blocksize, gpu_unary_op<T, Tout, F>) ); // find needed gridsize int neededgridsize = (input->size + blocksize - 1) / blocksize; int gridsize = std::min(neededgridsize, mingridsize); F functor; gpu_unary_op<<<gridsize, blocksize>>>( // input (const T*)input->data, input->valid, input->size, // output (Tout*)output->data, // action functor ); CUDA_CHECK_LAST(); return GDF_SUCCESS; } }; template<typename T, typename F> struct MathOp { static gdf_error launch(gdf_column *input, gdf_column *output) { return UnaryOp<T, T, F>::launch(input, output); } }; #define DEF_UNARY_OP_REAL(F) \ gdf_error F##_generic(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_FLOAT32: return F##_f32(input, output); \ case GDF_FLOAT64: return F##_f64(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output) { \ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } #define DEF_CAST_OP_TS(TO) \ gdf_error gdf_cast_generic_to_##TO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) {\ switch ( input->dtype ) { \ case GDF_INT8: return gdf_cast_i8_to_##TO(input, output, time_unit); \ case GDF_INT32: return gdf_cast_i32_to_##TO(input, output, time_unit); \ case GDF_INT64: return gdf_cast_i64_to_##TO(input, output, time_unit); \ case GDF_FLOAT32: return gdf_cast_f32_to_##TO(input, output, time_unit); \ case GDF_FLOAT64: return gdf_cast_f64_to_##TO(input, output, time_unit); \ case GDF_DATE32: return gdf_cast_date32_to_##TO(input, output, time_unit); \ case GDF_DATE64: return gdf_cast_date64_to_##TO(input, output, time_unit); \ case GDF_TIMESTAMP: return gdf_cast_timestamp_to_##TO(input, output, time_unit); \ default: return GDF_UNSUPPORTED_DTYPE; \ } \ } // trig functions template<typename T> struct DeviceSin { __device__ T apply(T data) { return std::sin(data); } }; template<typename T> struct DeviceCos { __device__ T apply(T data) { return std::cos(data); } }; template<typename T> struct DeviceTan { __device__ T apply(T data) { return std::tan(data); } }; template<typename T> struct DeviceArcSin { __device__ T apply(T data) { return std::asin(data); } }; template<typename T> struct DeviceArcCos { __device__ T apply(T data) { return std::acos(data); } }; template<typename T> struct DeviceArcTan { __device__ T apply(T data) { return std::atan(data); } }; DEF_UNARY_OP_REAL(gdf_sin) gdf_error gdf_sin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSin<float> >::launch(input, output); } gdf_error gdf_sin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_cos) gdf_error gdf_cos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCos<float> >::launch(input, output); } gdf_error gdf_cos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_tan) gdf_error gdf_tan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceTan<float> >::launch(input, output); } gdf_error gdf_tan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceTan<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_asin) gdf_error gdf_asin_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcSin<float> >::launch(input, output); } gdf_error gdf_asin_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcSin<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_acos) gdf_error gdf_acos_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcCos<float> >::launch(input, output); } gdf_error gdf_acos_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcCos<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_atan) gdf_error gdf_atan_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceArcTan<float> >::launch(input, output); } gdf_error gdf_atan_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceArcTan<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceExp { __device__ T apply(T data) { return std::exp(data); } }; template<typename T> struct DeviceLog { __device__ T apply(T data) { return std::log(data); } }; DEF_UNARY_OP_REAL(gdf_exp) gdf_error gdf_exp_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceExp<float> >::launch(input, output); } gdf_error gdf_exp_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceExp<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_log) gdf_error gdf_log_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceLog<float> >::launch(input, output); } gdf_error gdf_log_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceLog<double> >::launch(input, output); } // exponential functions template<typename T> struct DeviceSqrt { __device__ T apply(T data) { return std::sqrt(data); } }; DEF_UNARY_OP_REAL(gdf_sqrt) gdf_error gdf_sqrt_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceSqrt<float> >::launch(input, output); } gdf_error gdf_sqrt_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceSqrt<double> >::launch(input, output); } // rounding functions template<typename T> struct DeviceCeil { __device__ T apply(T data) { return std::ceil(data); } }; template<typename T> struct DeviceFloor { __device__ T apply(T data) { return std::floor(data); } }; DEF_UNARY_OP_REAL(gdf_ceil) gdf_error gdf_ceil_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceCeil<float> >::launch(input, output); } gdf_error gdf_ceil_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceCeil<double> >::launch(input, output); } DEF_UNARY_OP_REAL(gdf_floor) gdf_error gdf_floor_f32(gdf_column *input, gdf_column *output) { return MathOp<float, DeviceFloor<float> >::launch(input, output); } gdf_error gdf_floor_f64(gdf_column *input, gdf_column *output) { return MathOp<double, DeviceFloor<double> >::launch(input, output); } // casting template<typename From, typename To> struct DeviceCast { __device__ To apply(From data) { return (To)data; } }; template<typename From, typename To, int64_t units_factor> struct UpCasting { __device__ To apply(From data) { return (To)(data*units_factor); } }; template<typename From, typename To, int64_t units_factor> struct DownCasting { __device__ To apply(From data) { return (To)((data-(units_factor-1)*(data<0))/units_factor); //ceiling only when data is negative } }; // Castings are differentiate between physical and logical ones. // In physical casting only change the physical representation, for example from GDF_FLOAT32 (float) to GDF_FLOAT64 (double) // on the other hand, casting between date timestamps needs also perform some calculations according to the time unit: // - when the source or destination datatype is GDF_DATE32, the value is multiplied or divided by the amount of timeunits by day // - when datatypes are timestamps, the value is multiplied or divided according to the S.I. nano 10^-9, micro 10^-6, milli 10^-3 // No calculation is necessary when casting between GDF_DATE64 and GDF_TIMESTAMP (with ms as time unit), because are logically and physically the same thing #define DEF_CAST_IMPL(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ cudaStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::cuda::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE64 && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ms ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE32 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_s ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_us ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( ( LTFROM == GDF_TIMESTAMP && input->dtype_info.time_unit == TIME_UNIT_ns ) && LTO == GDF_DATE64 ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } // Castings functions where Timestamp is the destination type #define DEF_CAST_IMPL_TS(VFROM, VTO, TFROM, TTO, LTFROM, LTO) \ gdf_error gdf_cast_##VFROM##_to_##VTO(gdf_column *input, gdf_column *output, gdf_time_unit time_unit) { \ GDF_REQUIRE(input->dtype == LTFROM, GDF_UNSUPPORTED_DTYPE); \ \ cudaStream_t stream = 0; \ rmm_temp_allocator allocator(stream); \ \ output->dtype = LTO; \ output->dtype_info.time_unit = time_unit; \ if (input->valid && output->valid) { \ gdf_size_type num_chars_bitmask = gdf_get_num_chars_bitmask(input->size); \ thrust::copy(thrust::cuda::par(allocator).on(stream), input->valid, input->valid + num_chars_bitmask, output->valid); \ } \ \ /* Handling datetime logical castings */ \ if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ms ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE32 && ( LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns ) ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 86400000000000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_us) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_s) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( LTFROM == GDF_DATE64 && LTO == GDF_TIMESTAMP && time_unit == TIME_UNIT_ns) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( LTFROM == GDF_TIMESTAMP && LTO == GDF_TIMESTAMP ) \ { \ if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_s && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_s ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_ns ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ns && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_us && time_unit == TIME_UNIT_ms ) \ return UnaryOp<TFROM, TTO, DownCasting<TFROM, TTO, 1000> >::launch(input, output); \ else if( input->dtype_info.time_unit == TIME_UNIT_ms && time_unit == TIME_UNIT_us ) \ return UnaryOp<TFROM, TTO, UpCasting<TFROM, TTO, 1000> >::launch(input, output); \ } \ /* Handling only physical castings */ \ return UnaryOp<TFROM, TTO, DeviceCast<TFROM, TTO> >::launch(input, output); \ } #define DEF_CAST_IMPL_TEMPLATE(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP(ABREV) \ DEF_CAST_IMPL(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) #define DEF_CAST_IMPL_TEMPLATE_TS(ABREV, PHYSICAL_TYPE, LOGICAL_TYPE) \ DEF_CAST_OP_TS(ABREV) \ DEF_CAST_IMPL_TS(i8, ABREV, int8_t, PHYSICAL_TYPE, GDF_INT8, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i32, ABREV, int32_t, PHYSICAL_TYPE, GDF_INT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(i64, ABREV, int64_t, PHYSICAL_TYPE, GDF_INT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f32, ABREV, float, PHYSICAL_TYPE, GDF_FLOAT32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(f64, ABREV, double, PHYSICAL_TYPE, GDF_FLOAT64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date32, ABREV, int32_t, PHYSICAL_TYPE, GDF_DATE32, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(date64, ABREV, int64_t, PHYSICAL_TYPE, GDF_DATE64, LOGICAL_TYPE) \ DEF_CAST_IMPL_TS(timestamp, ABREV, int64_t, PHYSICAL_TYPE, GDF_TIMESTAMP, LOGICAL_TYPE) DEF_CAST_IMPL_TEMPLATE(f32, float, GDF_FLOAT32) DEF_CAST_IMPL_TEMPLATE(f64, double, GDF_FLOAT64) DEF_CAST_IMPL_TEMPLATE(i8, int8_t, GDF_INT8) DEF_CAST_IMPL_TEMPLATE(i32, int32_t, GDF_INT32) DEF_CAST_IMPL_TEMPLATE(i64, int64_t, GDF_INT64) DEF_CAST_IMPL_TEMPLATE(date32, int32_t, GDF_DATE32) DEF_CAST_IMPL_TEMPLATE(date64, int64_t, GDF_DATE64) DEF_CAST_IMPL_TEMPLATE_TS(timestamp, int64_t, GDF_TIMESTAMP)
33c432b7751199f65484e98dbc908fb47fbc9e0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "lodepng.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define MAX_MSE 0.00001f __global__ void rectification(unsigned char* image, unsigned char* new_image, unsigned int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { if (image[index] < 127) { new_image[index] = 127; } else { new_image[index] = image[index]; } } } __global__ void compression(unsigned char* image, unsigned char* new_image, unsigned width, unsigned int size, unsigned int blocks_per_row) { unsigned int index = threadIdx.x + (blockIdx.x % blocks_per_row) * blockDim.x; unsigned int new_index = (threadIdx.x + blockIdx.x * blockDim.x) + 4; if (index < size) { for (int i = 0; i < 4; i++) { // iterate through R, G, B, A unsigned int max = image[index]; if (image[index + 4 + i] > max) { // pixel to the right max = image[index + 4 + i]; } if (image[index + (4 * width) + i] > max) { // pixel below max = image[index + (4 * width) + i]; } if (image[index + (4 * width) + 4 + i] > max) { // pixel below & to the right max = image[index + (4 * width) + 4 + i]; } new_image[new_index + i] = max; } } } float get_MSE(char* input_filename_1, char* input_filename_2) { unsigned error1, error2; unsigned char* image1, * image2; unsigned width1, height1, width2, height2; error1 = lodepng_decode32_file(&image1, &width1, &height1, input_filename_1); error2 = lodepng_decode32_file(&image2, &width2, &height2, input_filename_2); if (error1) printf("error %u: %s\n", error1, lodepng_error_text(error1)); if (error2) printf("error %u: %s\n", error2, lodepng_error_text(error2)); if (width1 != width2) printf("images do not have same width\n"); if (height1 != height2) printf("images do not have same height\n"); // process image float im1, im2, diff, sum, MSE; sum = 0; for (int i = 0; i < width1 * height1; i++) { im1 = (float)image1[i]; im2 = (float)image2[i]; diff = im1 - im2; sum += diff * diff; } MSE = sqrt(sum) / (width1 * height1); free(image1); free(image2); return MSE; } int main() { // file definitions char* filename1 = "test.png"; // change these depending on what we're doing char* filename2 = "test_rectify_result.png"; // output for rectify char* filename3 = "test_pooling_result.png"; // output for pooling char* filename4 = "test_rectify_expected_result.png"; // filename for rectify comparison char* filename5 = "test_pooling_expected_result.png"; // filename for pooling comparison // load input image unsigned char* image; unsigned width, height; unsigned error = lodepng_decode32_file(&image, &width, &height, filename1); if (error) printf("error %u: %s\n", error, lodepng_error_text(error)); unsigned int size_image = width * height * 4 * sizeof(unsigned char); // height x width number of pixels, 4 layers (RGBA) for each pixel, 1 char for each value // define number of threads unsigned int thread_number = 1000; // number of threads per block we're using unsigned int thread_max = 1024; // hardware limit: maximum number of threads per block if (thread_number > thread_max) { // can't have more threads than the hardware limit thread_number = thread_max; } /********** Rectify Start ***********/ // allocate memory space on GPU unsigned char* cuda_image, * cuda_new_image; hipMalloc((void**)& cuda_image, size_image); hipMalloc((void**)& cuda_new_image, size_image); // CPU copies input data from CPU to GPU hipMemcpy(cuda_image, image, size_image, hipMemcpyHostToDevice); // figure out how many blocks we need for this task unsigned int num_blocks = (size_image + thread_number - 1) / thread_number; // call method on GPU hipLaunchKernelGGL(( rectification) , dim3(num_blocks), dim3(thread_number) , 0, 0, cuda_image, cuda_new_image, size_image); hipDeviceSynchronize(); // CPU copies input data from GPU back to CPU unsigned char* new_image = (unsigned char*)malloc(size_image); hipMemcpy(new_image, cuda_new_image, size_image, hipMemcpyDeviceToHost); hipFree(cuda_image); hipFree(cuda_new_image); lodepng_encode32_file(filename2, new_image, width, height); /********** Rectify End ***********/ /********** Pooling Start ***********/ // allocate memory space on GPU unsigned char* cuda_image_pool, * cuda_new_image_pool; hipMalloc((void**)& cuda_image_pool, size_image); hipMalloc((void**)& cuda_new_image_pool, size_image); // CPU copies input data from CPU to GPU hipMemcpy(cuda_image_pool, image, size_image, hipMemcpyHostToDevice); // maximum number of threads we can use is 1 per 16 pixel values // that's because we can use maximum 1 thread per 2x2 area, and each pixel in that 2x2 area has 4 values if (thread_number > ceil(size_image / 16)) { thread_number = ceil(size_image / 16); } // figure out how many blocks we need for this task num_blocks = ceil((size_image / thread_number) / 16) + 1; unsigned int blocks_per_row = ceil(width / thread_number); // call method on GPU hipLaunchKernelGGL(( compression) , dim3(num_blocks), dim3(thread_number) , 0, 0, cuda_image_pool, cuda_new_image_pool, width, size_image, blocks_per_row); hipDeviceSynchronize(); // CPU copies input data from GPU back to CPU unsigned char* new_image_pool = (unsigned char*)malloc(size_image); hipMemcpy(new_image_pool, cuda_new_image_pool, size_image, hipMemcpyDeviceToHost); hipFree(cuda_image_pool); hipFree(cuda_new_image_pool); lodepng_encode32_file(filename3, new_image_pool, width / 2, height / 2); /********** Pooling End ***********/ /********** Comparison Start ***********/ float MSE_rect = get_MSE(filename2, filename4); if (MSE_rect < MAX_MSE) { printf("Rectified image is equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_rect, MAX_MSE); } else { printf("Rectified image is NOT equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_rect, MAX_MSE); } float MSE_pool = get_MSE(filename3, filename5); if (MSE_pool < MAX_MSE) { printf("Pooled image is equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_pool, MAX_MSE); } else { printf("Pooled image is NOT equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_pool, MAX_MSE); } /********** Comparison End ***********/ free(image); free(new_image); free(new_image_pool); return 0; }
33c432b7751199f65484e98dbc908fb47fbc9e0d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "lodepng.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define MAX_MSE 0.00001f __global__ void rectification(unsigned char* image, unsigned char* new_image, unsigned int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { if (image[index] < 127) { new_image[index] = 127; } else { new_image[index] = image[index]; } } } __global__ void compression(unsigned char* image, unsigned char* new_image, unsigned width, unsigned int size, unsigned int blocks_per_row) { unsigned int index = threadIdx.x + (blockIdx.x % blocks_per_row) * blockDim.x; unsigned int new_index = (threadIdx.x + blockIdx.x * blockDim.x) + 4; if (index < size) { for (int i = 0; i < 4; i++) { // iterate through R, G, B, A unsigned int max = image[index]; if (image[index + 4 + i] > max) { // pixel to the right max = image[index + 4 + i]; } if (image[index + (4 * width) + i] > max) { // pixel below max = image[index + (4 * width) + i]; } if (image[index + (4 * width) + 4 + i] > max) { // pixel below & to the right max = image[index + (4 * width) + 4 + i]; } new_image[new_index + i] = max; } } } float get_MSE(char* input_filename_1, char* input_filename_2) { unsigned error1, error2; unsigned char* image1, * image2; unsigned width1, height1, width2, height2; error1 = lodepng_decode32_file(&image1, &width1, &height1, input_filename_1); error2 = lodepng_decode32_file(&image2, &width2, &height2, input_filename_2); if (error1) printf("error %u: %s\n", error1, lodepng_error_text(error1)); if (error2) printf("error %u: %s\n", error2, lodepng_error_text(error2)); if (width1 != width2) printf("images do not have same width\n"); if (height1 != height2) printf("images do not have same height\n"); // process image float im1, im2, diff, sum, MSE; sum = 0; for (int i = 0; i < width1 * height1; i++) { im1 = (float)image1[i]; im2 = (float)image2[i]; diff = im1 - im2; sum += diff * diff; } MSE = sqrt(sum) / (width1 * height1); free(image1); free(image2); return MSE; } int main() { // file definitions char* filename1 = "test.png"; // change these depending on what we're doing char* filename2 = "test_rectify_result.png"; // output for rectify char* filename3 = "test_pooling_result.png"; // output for pooling char* filename4 = "test_rectify_expected_result.png"; // filename for rectify comparison char* filename5 = "test_pooling_expected_result.png"; // filename for pooling comparison // load input image unsigned char* image; unsigned width, height; unsigned error = lodepng_decode32_file(&image, &width, &height, filename1); if (error) printf("error %u: %s\n", error, lodepng_error_text(error)); unsigned int size_image = width * height * 4 * sizeof(unsigned char); // height x width number of pixels, 4 layers (RGBA) for each pixel, 1 char for each value // define number of threads unsigned int thread_number = 1000; // number of threads per block we're using unsigned int thread_max = 1024; // hardware limit: maximum number of threads per block if (thread_number > thread_max) { // can't have more threads than the hardware limit thread_number = thread_max; } /********** Rectify Start ***********/ // allocate memory space on GPU unsigned char* cuda_image, * cuda_new_image; cudaMalloc((void**)& cuda_image, size_image); cudaMalloc((void**)& cuda_new_image, size_image); // CPU copies input data from CPU to GPU cudaMemcpy(cuda_image, image, size_image, cudaMemcpyHostToDevice); // figure out how many blocks we need for this task unsigned int num_blocks = (size_image + thread_number - 1) / thread_number; // call method on GPU rectification <<< num_blocks, thread_number >>> (cuda_image, cuda_new_image, size_image); cudaDeviceSynchronize(); // CPU copies input data from GPU back to CPU unsigned char* new_image = (unsigned char*)malloc(size_image); cudaMemcpy(new_image, cuda_new_image, size_image, cudaMemcpyDeviceToHost); cudaFree(cuda_image); cudaFree(cuda_new_image); lodepng_encode32_file(filename2, new_image, width, height); /********** Rectify End ***********/ /********** Pooling Start ***********/ // allocate memory space on GPU unsigned char* cuda_image_pool, * cuda_new_image_pool; cudaMalloc((void**)& cuda_image_pool, size_image); cudaMalloc((void**)& cuda_new_image_pool, size_image); // CPU copies input data from CPU to GPU cudaMemcpy(cuda_image_pool, image, size_image, cudaMemcpyHostToDevice); // maximum number of threads we can use is 1 per 16 pixel values // that's because we can use maximum 1 thread per 2x2 area, and each pixel in that 2x2 area has 4 values if (thread_number > ceil(size_image / 16)) { thread_number = ceil(size_image / 16); } // figure out how many blocks we need for this task num_blocks = ceil((size_image / thread_number) / 16) + 1; unsigned int blocks_per_row = ceil(width / thread_number); // call method on GPU compression <<< num_blocks, thread_number >>> (cuda_image_pool, cuda_new_image_pool, width, size_image, blocks_per_row); cudaDeviceSynchronize(); // CPU copies input data from GPU back to CPU unsigned char* new_image_pool = (unsigned char*)malloc(size_image); cudaMemcpy(new_image_pool, cuda_new_image_pool, size_image, cudaMemcpyDeviceToHost); cudaFree(cuda_image_pool); cudaFree(cuda_new_image_pool); lodepng_encode32_file(filename3, new_image_pool, width / 2, height / 2); /********** Pooling End ***********/ /********** Comparison Start ***********/ float MSE_rect = get_MSE(filename2, filename4); if (MSE_rect < MAX_MSE) { printf("Rectified image is equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_rect, MAX_MSE); } else { printf("Rectified image is NOT equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_rect, MAX_MSE); } float MSE_pool = get_MSE(filename3, filename5); if (MSE_pool < MAX_MSE) { printf("Pooled image is equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_pool, MAX_MSE); } else { printf("Pooled image is NOT equal to example (MSE = %f, MAX_MSE = %f)\n", MSE_pool, MAX_MSE); } /********** Comparison End ***********/ free(image); free(new_image); free(new_image_pool); return 0; }
e70404805bf551f7abc727875b671da213f2fa99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "icp.h" #include <thrust/transform_reduce.h> #include <thrust/functional.h> namespace cuda_icp{ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void transform_pcd_cuda(Vec2f* model_pcd_ptr, uint32_t model_pcd_size, Mat3x3f trans){ uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; if(i >= model_pcd_size) return; Vec2f& pcd = model_pcd_ptr[i]; float new_x = trans[0][0]*pcd.x + trans[0][1]*pcd.y + trans[0][2]; float new_y = trans[1][0]*pcd.x + trans[1][1]*pcd.y + trans[1][2]; pcd.x = new_x; pcd.y = new_y; } template<class Scene> RegistrationResult ICP2D_Point2Plane_cuda(device_vector_holder<Vec2f> &model_pcd, const Scene scene, const ICPConvergenceCriteria criteria){ RegistrationResult result; RegistrationResult backup; thrust::host_vector<float> A_host(9, 0); thrust::host_vector<float> b_host(3, 0); const uint32_t threadsPerBlock = 256; const uint32_t numBlocks = (model_pcd.size() + threadsPerBlock - 1)/threadsPerBlock; for(uint32_t iter=0; iter<= criteria.max_iteration_; iter++){ Vec11f Ab_tight = thrust::transform_reduce(thrust::hip::par.on(cudaStreamPerThread), model_pcd.begin_thr(), model_pcd.end_thr(), thrust__pcd2Ab<Scene>(scene), Vec11f::Zero(), thrust__plus()); hipStreamSynchronize(cudaStreamPerThread); backup = result; float& count = Ab_tight[10]; float& total_error = Ab_tight[9]; if(count == 0) return result; // avoid divid 0 result.fitness_ = float(count) / model_pcd.size(); result.inlier_rmse_ = std::sqrt(total_error / count); // last extra iter, just compute fitness & mse if(iter == criteria.max_iteration_) return result; if(std::abs(result.fitness_ - backup.fitness_) < criteria.relative_fitness_ && std::abs(result.inlier_rmse_ - backup.inlier_rmse_) < criteria.relative_rmse_){ return result; } for(int i=0; i<3; i++) b_host[i] = Ab_tight[6 + i]; int shift = 0; for(int y=0; y<3; y++){ for(int x=y; x<3; x++){ A_host[x + y*3] = Ab_tight[shift]; A_host[y + x*3] = Ab_tight[shift]; shift++; } } Mat3x3f extrinsic = eigen_slover_333(A_host.data(), b_host.data()); hipLaunchKernelGGL(( transform_pcd_cuda), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, model_pcd.data(), model_pcd.size(), extrinsic); hipStreamSynchronize(cudaStreamPerThread); result.transformation_ = extrinsic * result.transformation_; } // never arrive here return result; } template RegistrationResult ICP2D_Point2Plane_cuda(device_vector_holder<Vec2f> &model_pcd, const Scene_edge scene, const ICPConvergenceCriteria criteria); }
e70404805bf551f7abc727875b671da213f2fa99.cu
#include "icp.h" #include <thrust/transform_reduce.h> #include <thrust/functional.h> namespace cuda_icp{ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void transform_pcd_cuda(Vec2f* model_pcd_ptr, uint32_t model_pcd_size, Mat3x3f trans){ uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; if(i >= model_pcd_size) return; Vec2f& pcd = model_pcd_ptr[i]; float new_x = trans[0][0]*pcd.x + trans[0][1]*pcd.y + trans[0][2]; float new_y = trans[1][0]*pcd.x + trans[1][1]*pcd.y + trans[1][2]; pcd.x = new_x; pcd.y = new_y; } template<class Scene> RegistrationResult ICP2D_Point2Plane_cuda(device_vector_holder<Vec2f> &model_pcd, const Scene scene, const ICPConvergenceCriteria criteria){ RegistrationResult result; RegistrationResult backup; thrust::host_vector<float> A_host(9, 0); thrust::host_vector<float> b_host(3, 0); const uint32_t threadsPerBlock = 256; const uint32_t numBlocks = (model_pcd.size() + threadsPerBlock - 1)/threadsPerBlock; for(uint32_t iter=0; iter<= criteria.max_iteration_; iter++){ Vec11f Ab_tight = thrust::transform_reduce(thrust::cuda::par.on(cudaStreamPerThread), model_pcd.begin_thr(), model_pcd.end_thr(), thrust__pcd2Ab<Scene>(scene), Vec11f::Zero(), thrust__plus()); cudaStreamSynchronize(cudaStreamPerThread); backup = result; float& count = Ab_tight[10]; float& total_error = Ab_tight[9]; if(count == 0) return result; // avoid divid 0 result.fitness_ = float(count) / model_pcd.size(); result.inlier_rmse_ = std::sqrt(total_error / count); // last extra iter, just compute fitness & mse if(iter == criteria.max_iteration_) return result; if(std::abs(result.fitness_ - backup.fitness_) < criteria.relative_fitness_ && std::abs(result.inlier_rmse_ - backup.inlier_rmse_) < criteria.relative_rmse_){ return result; } for(int i=0; i<3; i++) b_host[i] = Ab_tight[6 + i]; int shift = 0; for(int y=0; y<3; y++){ for(int x=y; x<3; x++){ A_host[x + y*3] = Ab_tight[shift]; A_host[y + x*3] = Ab_tight[shift]; shift++; } } Mat3x3f extrinsic = eigen_slover_333(A_host.data(), b_host.data()); transform_pcd_cuda<<<numBlocks, threadsPerBlock>>>(model_pcd.data(), model_pcd.size(), extrinsic); cudaStreamSynchronize(cudaStreamPerThread); result.transformation_ = extrinsic * result.transformation_; } // never arrive here return result; } template RegistrationResult ICP2D_Point2Plane_cuda(device_vector_holder<Vec2f> &model_pcd, const Scene_edge scene, const ICPConvergenceCriteria criteria); }
07a4174b1bfc29a877b97eaea568a60ed20ce611.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <limits> #include "catch.hpp" #include "update.h" extern const int nThreads {128}; TEST_CASE("update parameters", "[updateParams]") { int n_W {8}; float *W = new float[n_W] {1.2f, -1.8f, -0.7f, 0.14f, 0.3f, 0.85f, -0.56f, 0.34f}; float *dW = new float[n_W] {0.179496767558757f, 0.0f, 0.115390779144915f, 0.0f, 0.230781558289831f, 0.0f, -0.409820425931346f, 0.0f}; int n_B {3}; float *B = new float[n_B] {0.1f, 0.2f, 0.67f}; float *dB = new float[n_B] {0.256423953655368f, 0.0f, -0.457899917241728f}; float *d_W, *d_dW, *d_B, *d_dB; hipMalloc(&d_W, n_W * sizeof(float)); hipMalloc(&d_dW, n_W * sizeof(float)); hipMalloc(&d_B, n_B * sizeof(float)); hipMalloc(&d_dB, n_B * sizeof(float)); hipMemcpy(d_W, W, n_W * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_dW, dW, n_W * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, B, n_B * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_dB, dB, n_B * sizeof(float), hipMemcpyHostToDevice); float learn_rate {0.1f}; float *correct_B = new float[n_B] {0.0743576046344633f, 0.2f, 0.715789991724173f}; float *correct_W = new float[n_W] {1.182050323244124f, -1.8f, -0.711539077914491f, 0.14f, 0.276921844171017f, 0.85f, -0.519017957406865f, 0.34f}; int nBlocks {0}; SECTION("update B") { nBlocks = (n_B + nThreads - 1) / nThreads; hipLaunchKernelGGL(( UpdateB), dim3(nBlocks), dim3(nThreads), 0, 0, n_B, d_B, d_dB, learn_rate); hipMemcpy(B, d_B, n_B * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < n_B; ++i) CHECK(B[i] == Approx(correct_B[i]).epsilon(std::numeric_limits<float>::epsilon())); } SECTION("update W") { nBlocks = (n_W + nThreads - 1) / nThreads; hipLaunchKernelGGL(( UpdateW), dim3(nBlocks), dim3(nThreads), 0, 0, n_W, d_W, d_dW, learn_rate); hipMemcpy(W, d_W, n_W * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < n_W; ++i) CHECK(W[i] == Approx(correct_W[i]).epsilon(std::numeric_limits<float>::epsilon())); } SECTION("update W and B") { UpdateParameters(n_W, d_W, d_dW, n_B, d_B, d_dB, learn_rate); hipMemcpy(B, d_B, n_B * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < n_B; ++i) CHECK(B[i] == Approx(correct_B[i]).epsilon(std::numeric_limits<float>::epsilon())); hipMemcpy(W, d_W, n_W * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < n_W; ++i) CHECK(W[i] == Approx(correct_W[i]).epsilon(std::numeric_limits<float>::epsilon())); } hipFree(d_W); hipFree(d_dW); hipFree(d_B); hipFree(d_dB); delete[] W; delete[] dW; delete[] B; delete[] dB; delete[] correct_W; delete[] correct_B; }
07a4174b1bfc29a877b97eaea568a60ed20ce611.cu
#include <limits> #include "catch.hpp" #include "update.h" extern const int nThreads {128}; TEST_CASE("update parameters", "[updateParams]") { int n_W {8}; float *W = new float[n_W] {1.2f, -1.8f, -0.7f, 0.14f, 0.3f, 0.85f, -0.56f, 0.34f}; float *dW = new float[n_W] {0.179496767558757f, 0.0f, 0.115390779144915f, 0.0f, 0.230781558289831f, 0.0f, -0.409820425931346f, 0.0f}; int n_B {3}; float *B = new float[n_B] {0.1f, 0.2f, 0.67f}; float *dB = new float[n_B] {0.256423953655368f, 0.0f, -0.457899917241728f}; float *d_W, *d_dW, *d_B, *d_dB; cudaMalloc(&d_W, n_W * sizeof(float)); cudaMalloc(&d_dW, n_W * sizeof(float)); cudaMalloc(&d_B, n_B * sizeof(float)); cudaMalloc(&d_dB, n_B * sizeof(float)); cudaMemcpy(d_W, W, n_W * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_dW, dW, n_W * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, n_B * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_dB, dB, n_B * sizeof(float), cudaMemcpyHostToDevice); float learn_rate {0.1f}; float *correct_B = new float[n_B] {0.0743576046344633f, 0.2f, 0.715789991724173f}; float *correct_W = new float[n_W] {1.182050323244124f, -1.8f, -0.711539077914491f, 0.14f, 0.276921844171017f, 0.85f, -0.519017957406865f, 0.34f}; int nBlocks {0}; SECTION("update B") { nBlocks = (n_B + nThreads - 1) / nThreads; UpdateB<<<nBlocks, nThreads>>>(n_B, d_B, d_dB, learn_rate); cudaMemcpy(B, d_B, n_B * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < n_B; ++i) CHECK(B[i] == Approx(correct_B[i]).epsilon(std::numeric_limits<float>::epsilon())); } SECTION("update W") { nBlocks = (n_W + nThreads - 1) / nThreads; UpdateW<<<nBlocks, nThreads>>>(n_W, d_W, d_dW, learn_rate); cudaMemcpy(W, d_W, n_W * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < n_W; ++i) CHECK(W[i] == Approx(correct_W[i]).epsilon(std::numeric_limits<float>::epsilon())); } SECTION("update W and B") { UpdateParameters(n_W, d_W, d_dW, n_B, d_B, d_dB, learn_rate); cudaMemcpy(B, d_B, n_B * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < n_B; ++i) CHECK(B[i] == Approx(correct_B[i]).epsilon(std::numeric_limits<float>::epsilon())); cudaMemcpy(W, d_W, n_W * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < n_W; ++i) CHECK(W[i] == Approx(correct_W[i]).epsilon(std::numeric_limits<float>::epsilon())); } cudaFree(d_W); cudaFree(d_dW); cudaFree(d_B); cudaFree(d_dB); delete[] W; delete[] dW; delete[] B; delete[] dB; delete[] correct_W; delete[] correct_B; }
a329f8d9f82fd15c02e8e1920046f51656db92ff.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __STEREOVISIONAPP_DRIVER_CU #define __STEREOVISIONAPP_DRIVER_CU #include <cstdlib> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <cfloat> #include <cmath> #include "./tests/stereoVisionAppData.h" #include "../codegenInput/UpdateApp.cuh" #include "../codegenInput/EnergyApp.cuh" #define ifL "/home/research/e.flores/StereoVision/tutorial-tsukuba-imL.pgm" #define ifR "/home/research/e.flores/StereoVision/tutorial-tsukuba-imR.pgm" #define of "/home/research/e.flores/StereoVision/tskuba-output.pgm" #define convergence -0.001 #define numLabels 16 #define windowSize 5 #define lambda 20 #define truncate 2 int width, height, numPixels; void importPGM(string filePath, unsigned char** pixels); void setDC(int x, int y, int label, float val, float** DCArray); float getDC(int x, int y, int label, float** DCArray); void run_stereoVisionApp() { unsigned char* leftPixels, * rightPixels; // import the images importPGM(ifL, &leftPixels); importPGM(ifR, &rightPixels); // calculate borders int leftBorder = (numLabels - 1) + (windowSize / 2); int rightBorder = width - (windowSize / 2) - 1; int topBorder = windowSize / 2; int bottomBorder = height - (windowSize / 2) - 1; // allocate memory float* currMsg; hipMallocManaged(&currMsg, 4*numLabels*numPixels*sizeof(float)); currMsg = new float[4*numLabels*numPixels]; float* prevMsg; hipMallocManaged(&prevMsg, 4*numLabels*numPixels*sizeof(float)); prevMsg = new float[4*numLabels*numPixels]; float* belief; hipMallocManaged(&belief, numLabels*numPixels*sizeof(float)); belief = new float[numLabels*numPixels]; float* DC; hipMallocManaged(&DC, numLabels*numPixels*sizeof(float)); DC = new float[numLabels*numPixels]; // memoize data costs for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { for (int k = 0; k < numLabels; k++) { int sum = 0; int absDiff; for (int i = x - windowSize / 2; i <= x + windowSize / 2; i++) { for (int j = y - windowSize / 2; j <= y + windowSize / 2; j++) { absDiff = abs((int) leftPixels[j*width+i] - (int) rightPixels[(j-k)*width+i]); sum += absDiff; } } setDC(x, y, k, (float) sum, &DC); } } } // print out all DC vectors for debugging for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { if (x%100==0 && y%100==0) { std::cout << "(" << x << "," << y << ")\t["; for (int k = 0; k < numLabels; k++) { std::cout << getDC(x, y, k, &DC) << ", "; }//End of for std::cout << "]\n"; }//End of if }//End of for }//End of for // set up buffers int* inBufferData; //Would I need to create another inBufferData variable? //Would I need to create another InputBuffer? //If how would I deal with therons calculations (updated belief) output to use for my input of my calcualtions (energy calcuations) hipMallocManaged(&inBufferData, numPixels*sizeof(int)); Mercator::InputBuffer<int>* inBuffer = new Mercator::InputBuffer<int>(inBufferData, numPixels); Mercator::OutputBuffer<int>* outBuffer = new Mercator::OutputBuffer<int>(numPixels); Mercator::OutputBuffer<int> outBuffer2 = new Mercator::OutputBuffer<int >(OUT_BUFFER_CAPACITY1); //How to know how much of a out buffer capacity I need? //do I say inBufferData? for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { inBuffer->add(y*width+x); } } // create app object UpdateApp* updateApp = new UpdateApp(); //create energyApp object EnergyApp* energyApp = new EnergyApp(); // set app-level data StereoVisionAppData* appData = new StereoVisionAppData( currMsg, prevMsg, belief, DC, numLabels, lambda, truncate, width, height ); updateApp->set_userData(appData); energyApp-> set_userData(energyApp); // associate buffers with nodes updateApp->sourceNode->set_inBuffer(inBuffer); updateApp->sinkNode->set_outBuffer(outBuffer); updateApp->run(); energyApp->run(); std::cout << "App run completed" << endl; // synthApp0->run(); // std::cout << "SynthApp0 finished.\n" ; // // print contents of output buffer // #if PRINT_OUTPUT_BUFFERS // std::cout << " Output buffers: \n" ; // int* outData1 = outBuffer1->get_data(); // printf("SynthApp0, OutBuffer1 (%p):\n", outBuffer1); // for(int i=0; i < outBuffer1->size(); ++i) // printf("[%d]: %d\n", i, outData1[i]); // int* outData2 = outBuffer2->get_data(); // printf("SynthApp0, OutBuffer2 (%p):\n", outBuffer2); // for(int i=0; i < outBuffer2->size(); ++i) // printf("[%d]: %d\n", i, outData2[i]); // int* outData3 = outBuffer3->get_data(); // printf("SynthApp0, OutBuffer3 (%p):\n", outBuffer3); // for(int i=0; i < outBuffer3->size(); ++i) // printf("[%d]: %d\n", i, outData3[i]); // #endif // print contents of output buffer // // cleanup // hipFree(inBufferData); // hipFree(synthApp0); } void importPGM(string filePath, unsigned char** pixels) { string line; ifstream file(filePath.c_str()); if (file.is_open()) { getline(file, line); line = line.substr(0, 2); if (line.compare("P2") != 0) { std::cout << "Incorrect file type" << endl; exit(EXIT_FAILURE); } getline(file, line); stringstream ss(line); char c; int ifWidth, ifHeight; ss >> c; if (c == '#') { // ignore this line of the file getline(file, line); ss.str(line); ss >> ifWidth >> ifHeight; } else { // that char is actually the width ifWidth = (int) c; ss >> ifHeight; } width = ifWidth; height = ifHeight; std::cout << "Width: " << width << " Height: " << height << endl; numPixels = width * height; std::cout << "numPixels: " << numPixels << endl; *pixels = new unsigned char[numPixels]; getline(file, line); // read maximum gray value int pixel; int count = 0; while (!file.eof()) { getline(file, line); ss.clear(); ss.str(line); while (ss >> pixel) { (*pixels)[count] = pixel; count++; } } std::cout << "Import successful" << endl << endl; } else { std::cout << "Import failed" << endl; exit(EXIT_FAILURE); } } void setDC(int x, int y, int label, float val, float** DCArray) { (*DCArray)[(numPixels*label) + (y*width+x)] = val; } float getDC(int x, int y, int label, float** DCArray) { return (*DCArray)[(numPixels*label) + (y*width+x)]; } #endif
a329f8d9f82fd15c02e8e1920046f51656db92ff.cu
#ifndef __STEREOVISIONAPP_DRIVER_CU #define __STEREOVISIONAPP_DRIVER_CU #include <cstdlib> #include <iostream> #include <string> #include <fstream> #include <sstream> #include <cfloat> #include <cmath> #include "./tests/stereoVisionAppData.h" #include "../codegenInput/UpdateApp.cuh" #include "../codegenInput/EnergyApp.cuh" #define ifL "/home/research/e.flores/StereoVision/tutorial-tsukuba-imL.pgm" #define ifR "/home/research/e.flores/StereoVision/tutorial-tsukuba-imR.pgm" #define of "/home/research/e.flores/StereoVision/tskuba-output.pgm" #define convergence -0.001 #define numLabels 16 #define windowSize 5 #define lambda 20 #define truncate 2 int width, height, numPixels; void importPGM(string filePath, unsigned char** pixels); void setDC(int x, int y, int label, float val, float** DCArray); float getDC(int x, int y, int label, float** DCArray); void run_stereoVisionApp() { unsigned char* leftPixels, * rightPixels; // import the images importPGM(ifL, &leftPixels); importPGM(ifR, &rightPixels); // calculate borders int leftBorder = (numLabels - 1) + (windowSize / 2); int rightBorder = width - (windowSize / 2) - 1; int topBorder = windowSize / 2; int bottomBorder = height - (windowSize / 2) - 1; // allocate memory float* currMsg; cudaMallocManaged(&currMsg, 4*numLabels*numPixels*sizeof(float)); currMsg = new float[4*numLabels*numPixels]; float* prevMsg; cudaMallocManaged(&prevMsg, 4*numLabels*numPixels*sizeof(float)); prevMsg = new float[4*numLabels*numPixels]; float* belief; cudaMallocManaged(&belief, numLabels*numPixels*sizeof(float)); belief = new float[numLabels*numPixels]; float* DC; cudaMallocManaged(&DC, numLabels*numPixels*sizeof(float)); DC = new float[numLabels*numPixels]; // memoize data costs for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { for (int k = 0; k < numLabels; k++) { int sum = 0; int absDiff; for (int i = x - windowSize / 2; i <= x + windowSize / 2; i++) { for (int j = y - windowSize / 2; j <= y + windowSize / 2; j++) { absDiff = abs((int) leftPixels[j*width+i] - (int) rightPixels[(j-k)*width+i]); sum += absDiff; } } setDC(x, y, k, (float) sum, &DC); } } } // print out all DC vectors for debugging for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { if (x%100==0 && y%100==0) { std::cout << "(" << x << "," << y << ")\t["; for (int k = 0; k < numLabels; k++) { std::cout << getDC(x, y, k, &DC) << ", "; }//End of for std::cout << "]\n"; }//End of if }//End of for }//End of for // set up buffers int* inBufferData; //Would I need to create another inBufferData variable? //Would I need to create another InputBuffer? //If how would I deal with therons calculations (updated belief) output to use for my input of my calcualtions (energy calcuations) cudaMallocManaged(&inBufferData, numPixels*sizeof(int)); Mercator::InputBuffer<int>* inBuffer = new Mercator::InputBuffer<int>(inBufferData, numPixels); Mercator::OutputBuffer<int>* outBuffer = new Mercator::OutputBuffer<int>(numPixels); Mercator::OutputBuffer<int>∗ outBuffer2 = new Mercator::OutputBuffer<int >(OUT_BUFFER_CAPACITY1); //How to know how much of a out buffer capacity I need? //do I say inBufferData? for (int x = leftBorder; x <= rightBorder; x++) { for (int y = topBorder; y <= bottomBorder; y++) { inBuffer->add(y*width+x); } } // create app object UpdateApp* updateApp = new UpdateApp(); //create energyApp object EnergyApp* energyApp = new EnergyApp(); // set app-level data StereoVisionAppData* appData = new StereoVisionAppData( currMsg, prevMsg, belief, DC, numLabels, lambda, truncate, width, height ); updateApp->set_userData(appData); energyApp-> set_userData(energyApp); // associate buffers with nodes updateApp->sourceNode->set_inBuffer(inBuffer); updateApp->sinkNode->set_outBuffer(outBuffer); updateApp->run(); energyApp->run(); std::cout << "App run completed" << endl; // synthApp0->run(); // std::cout << "SynthApp0 finished.\n" ; // // print contents of output buffer // #if PRINT_OUTPUT_BUFFERS // std::cout << " Output buffers: \n" ; // int* outData1 = outBuffer1->get_data(); // printf("SynthApp0, OutBuffer1 (%p):\n", outBuffer1); // for(int i=0; i < outBuffer1->size(); ++i) // printf("[%d]: %d\n", i, outData1[i]); // int* outData2 = outBuffer2->get_data(); // printf("SynthApp0, OutBuffer2 (%p):\n", outBuffer2); // for(int i=0; i < outBuffer2->size(); ++i) // printf("[%d]: %d\n", i, outData2[i]); // int* outData3 = outBuffer3->get_data(); // printf("SynthApp0, OutBuffer3 (%p):\n", outBuffer3); // for(int i=0; i < outBuffer3->size(); ++i) // printf("[%d]: %d\n", i, outData3[i]); // #endif // print contents of output buffer // // cleanup // cudaFree(inBufferData); // cudaFree(synthApp0); } void importPGM(string filePath, unsigned char** pixels) { string line; ifstream file(filePath.c_str()); if (file.is_open()) { getline(file, line); line = line.substr(0, 2); if (line.compare("P2") != 0) { std::cout << "Incorrect file type" << endl; exit(EXIT_FAILURE); } getline(file, line); stringstream ss(line); char c; int ifWidth, ifHeight; ss >> c; if (c == '#') { // ignore this line of the file getline(file, line); ss.str(line); ss >> ifWidth >> ifHeight; } else { // that char is actually the width ifWidth = (int) c; ss >> ifHeight; } width = ifWidth; height = ifHeight; std::cout << "Width: " << width << " Height: " << height << endl; numPixels = width * height; std::cout << "numPixels: " << numPixels << endl; *pixels = new unsigned char[numPixels]; getline(file, line); // read maximum gray value int pixel; int count = 0; while (!file.eof()) { getline(file, line); ss.clear(); ss.str(line); while (ss >> pixel) { (*pixels)[count] = pixel; count++; } } std::cout << "Import successful" << endl << endl; } else { std::cout << "Import failed" << endl; exit(EXIT_FAILURE); } } void setDC(int x, int y, int label, float val, float** DCArray) { (*DCArray)[(numPixels*label) + (y*width+x)] = val; } float getDC(int x, int y, int label, float** DCArray) { return (*DCArray)[(numPixels*label) + (y*width+x)]; } #endif
ffcd1faacc19a6efb16cd5618a5617a514ba658c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "symbols/NaN.cuh" __global__ void dropoutRuntimeKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float keepProbability, float* input, float* result) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { result[indexEntry] = keepProbability * input[indexEntry]; } } else { setToNaN(result, firstEntryWithinBatch, lastEntryWithinBatch); } } }
ffcd1faacc19a6efb16cd5618a5617a514ba658c.cu
#include "symbols/NaN.cuh" __global__ void dropoutRuntimeKernel ( int batchSize, int numberRows, int numberEntriesPerInstance, int numberIterations, float keepProbability, float* input, float* result) { int indexInstance = blockIdx.x; int indexColumn = blockIdx.y; int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; int startColumnWithinInstance = indexColumn * numberRows; int startRowWithinColumn = threadIdx.x * numberIterations; int firstEntryWithinBatch = startInstanceWithinBatch + startColumnWithinInstance + startRowWithinColumn; int startNextColumn = startInstanceWithinBatch + startColumnWithinInstance + numberRows; if(firstEntryWithinBatch < startNextColumn) { int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); if(indexInstance < batchSize) { for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { result[indexEntry] = keepProbability * input[indexEntry]; } } else { setToNaN(result, firstEntryWithinBatch, lastEntryWithinBatch); } } }
68e4a426b9368348f3258daa0a33d7e2bf6f2a9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void per_row_kernel(int *in, int N) //1D grid, 2D block { int cno; int id = threadIdx.x*blockDim.y + threadIdx.y + blockIdx.x*(blockDim.x * blockDim.y); int rno = id; if(rno<N) { for(cno=0;cno<N;++cno) //i is column number { if(rno>cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } } } __global__ void per_element_kernel(int *in, int N) //3D grid, 1D block { int id = blockIdx.z*blockDim.x*gridDim.x*gridDim.y + blockIdx.y*blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; int cno,rno; cno = id%N; rno = id/N; if(rno<N) { if(rno > cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } } __global__ void per_element_kernel_2D(int *in, int N) //2D grid, 2D block { int id = blockIdx.y*blockDim.x*blockDim.y*gridDim.x + blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x; int cno,rno; cno = id%N; rno = id/N; if(rno<N) { if(rno > cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } }
68e4a426b9368348f3258daa0a33d7e2bf6f2a9c.cu
__global__ void per_row_kernel(int *in, int N) //1D grid, 2D block { int cno; int id = threadIdx.x*blockDim.y + threadIdx.y + blockIdx.x*(blockDim.x * blockDim.y); int rno = id; if(rno<N) { for(cno=0;cno<N;++cno) //i is column number { if(rno>cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } } } __global__ void per_element_kernel(int *in, int N) //3D grid, 1D block { int id = blockIdx.z*blockDim.x*gridDim.x*gridDim.y + blockIdx.y*blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x; int cno,rno; cno = id%N; rno = id/N; if(rno<N) { if(rno > cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } } __global__ void per_element_kernel_2D(int *in, int N) //2D grid, 2D block { int id = blockIdx.y*blockDim.x*blockDim.y*gridDim.x + blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x; int cno,rno; cno = id%N; rno = id/N; if(rno<N) { if(rno > cno) { in[cno*N + rno] = in[rno*N + cno]; in[rno*N + cno] = 0; } } }
ce58fe20a9cf26b86a2f70ea6d7bf939bacc1709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "incr_kernel.cuh" #include "incr_wrapper.h" __global__ void incr_kernel( double* a, const double* b, size_t n ) { for( size_t i = 0; i < n; ++i ) { a[i] = a[i] + b[i]; } } void incr( double* a, const double* b, size_t n ) { double* _a; double* _b; hipMalloc( &_a, n * sizeof( double ) ); hipMalloc( &_b, n * sizeof( double ) ); hipMemcpy( _a, a, n * sizeof( double ), hipMemcpyHostToDevice ); hipMemcpy( _b, b, n * sizeof( double ), hipMemcpyHostToDevice ); //! @note This is essentially serial, with *loads* of memcpy overhead. hipLaunchKernelGGL(( incr_kernel), dim3(1), dim3(1), 0, 0, _a, _b, n ); hipMemcpy( a, _a, n * sizeof( double ), hipMemcpyDeviceToHost ); hipFree( _a ); hipFree( _b ); }
ce58fe20a9cf26b86a2f70ea6d7bf939bacc1709.cu
#include "incr_kernel.cuh" #include "incr_wrapper.h" __global__ void incr_kernel( double* a, const double* b, size_t n ) { for( size_t i = 0; i < n; ++i ) { a[i] = a[i] + b[i]; } } void incr( double* a, const double* b, size_t n ) { double* _a; double* _b; cudaMalloc( &_a, n * sizeof( double ) ); cudaMalloc( &_b, n * sizeof( double ) ); cudaMemcpy( _a, a, n * sizeof( double ), cudaMemcpyHostToDevice ); cudaMemcpy( _b, b, n * sizeof( double ), cudaMemcpyHostToDevice ); //! @note This is essentially serial, with *loads* of memcpy overhead. incr_kernel<<<1, 1>>>( _a, _b, n ); cudaMemcpy( a, _a, n * sizeof( double ), cudaMemcpyDeviceToHost ); cudaFree( _a ); cudaFree( _b ); }